Initial commit of integrated agent system

This commit is contained in:
cillin
2026-03-30 17:46:44 +08:00
commit 0fa413380c
337 changed files with 75268 additions and 0 deletions

View File

@@ -0,0 +1,56 @@
# -*- coding: utf-8 -*-
"""
Agents package - EvoAgent architecture for trading system.
Exports:
- EvoAgent: Next-generation agent with workspace support
- ToolGuardMixin: Tool call approval/denial flow
- CommandHandler: System command handling
- AgentFactory: Dynamic agent creation and management
- WorkspaceManager: Legacy name for the persistent workspace registry
- WorkspaceRegistry: Explicit run-time-agnostic workspace registry
- RunWorkspaceManager: Run-scoped workspace asset manager
- AgentRegistry: Central agent registry
- Legacy compatibility: AnalystAgent, PMAgent, RiskAgent
"""
# New EvoAgent architecture (from agent_core.py)
from .agent_core import EvoAgent, ToolGuardMixin, CommandHandler
from .factory import AgentFactory, ModelConfig
from .workspace import WorkspaceManager, WorkspaceRegistry, WorkspaceConfig
from .workspace_manager import RunWorkspaceManager
from .registry import AgentRegistry, AgentInfo, get_registry, reset_registry
# Legacy agents (backward compatibility)
from .analyst import AnalystAgent
from .portfolio_manager import PMAgent
from .risk_manager import RiskAgent
# Compatibility layer
from .compat import LegacyAgentAdapter, adapt_agent, adapt_agents, is_legacy_agent
__all__ = [
# New architecture
"EvoAgent",
"ToolGuardMixin",
"CommandHandler",
"AgentFactory",
"ModelConfig",
"WorkspaceManager",
"WorkspaceRegistry",
"WorkspaceConfig",
"RunWorkspaceManager",
"AgentRegistry",
"AgentInfo",
"get_registry",
"reset_registry",
# Legacy compatibility
"AnalystAgent",
"PMAgent",
"RiskAgent",
# Compatibility layer
"LegacyAgentAdapter",
"adapt_agent",
"adapt_agents",
"is_legacy_agent",
]

View File

@@ -0,0 +1,18 @@
# -*- coding: utf-8 -*-
"""
Compatibility layer for legacy imports.
This module re-exports the newer base implementations so existing import
paths (`from backend.agents.agent_core import EvoAgent`) continue to work while
centralizing the actual logic in `backend.agents.base.evo_agent`.
"""
from .base.command_handler import CommandHandler
from .base.evo_agent import EvoAgent
from .base.tool_guard import ToolGuardMixin
__all__ = [
"EvoAgent",
"ToolGuardMixin",
"CommandHandler",
]

View File

@@ -0,0 +1,75 @@
# -*- coding: utf-8 -*-
"""Per-agent run-scoped workspace configuration helpers."""
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, List, Optional
import yaml
@dataclass(frozen=True)
class AgentWorkspaceConfig:
"""Structured agent config loaded from runs/<config>/agents/<agent>/agent.yaml."""
values: Dict[str, Any] = field(default_factory=dict)
@property
def prompt_files(self) -> Optional[List[str]]:
raw = self.values.get("prompt_files")
if not isinstance(raw, list):
return None
files = [
str(item).strip()
for item in raw
if isinstance(item, str) and str(item).strip()
]
return files or None
@property
def enabled_skills(self) -> List[str]:
return _normalized_string_list(self.values.get("enabled_skills"))
@property
def disabled_skills(self) -> List[str]:
return _normalized_string_list(self.values.get("disabled_skills"))
@property
def active_tool_groups(self) -> Optional[List[str]]:
groups = _normalized_string_list(self.values.get("active_tool_groups"))
return groups or None
@property
def disabled_tool_groups(self) -> List[str]:
return _normalized_string_list(self.values.get("disabled_tool_groups"))
def get(self, key: str, default: Any = None) -> Any:
return self.values.get(key, default)
def _normalized_string_list(raw: Any) -> List[str]:
if not isinstance(raw, list):
return []
seen: List[str] = []
for item in raw:
if not isinstance(item, str):
continue
value = item.strip()
if value and value not in seen:
seen.append(value)
return seen
def load_agent_workspace_config(path: Path) -> AgentWorkspaceConfig:
"""Load agent.yaml if present."""
if not path.exists() or not path.is_file():
return AgentWorkspaceConfig()
raw = path.read_text(encoding="utf-8").strip()
if not raw:
return AgentWorkspaceConfig()
parsed = yaml.safe_load(raw) or {}
if not isinstance(parsed, dict):
parsed = {}
return AgentWorkspaceConfig(values=parsed)

139
backend/agents/analyst.py Normal file
View File

@@ -0,0 +1,139 @@
# -*- coding: utf-8 -*-
"""
Analyst Agent - Based on AgentScope ReActAgent
Performs analysis using tools and LLM
"""
from typing import Any, Dict, Optional
from agentscope.agent import ReActAgent
from agentscope.memory import InMemoryMemory, LongTermMemoryBase
from agentscope.message import Msg
from ..config.constants import ANALYST_TYPES
from ..utils.progress import progress
from .prompt_factory import build_agent_system_prompt, clear_prompt_factory_cache
class AnalystAgent(ReActAgent):
"""
Analyst Agent - Uses LLM for tool selection and analysis
Inherits from AgentScope's ReActAgent
"""
def __init__(
self,
analyst_type: str,
toolkit: Any,
model: Any,
formatter: Any,
agent_id: Optional[str] = None,
config: Optional[Dict[str, Any]] = None,
long_term_memory: Optional[LongTermMemoryBase] = None,
):
"""
Initialize Analyst Agent
Args:
analyst_type: Type of analyst (e.g., "fundamentals", etc.)
toolkit: AgentScope Toolkit instance
model: LLM model instance
formatter: Message formatter instance
agent_id: Agent ID (defaults to "{analyst_type}_analyst")
config: Configuration dictionary
long_term_memory: Optional ReMeTaskLongTermMemory instance
"""
if analyst_type not in ANALYST_TYPES:
raise ValueError(
f"Unknown analyst type: {analyst_type}. "
f"Must be one of: {list(ANALYST_TYPES.keys())}",
)
object.__setattr__(self, "analyst_type_key", analyst_type)
object.__setattr__(
self,
"analyst_persona",
ANALYST_TYPES[analyst_type]["display_name"],
)
if agent_id is None:
agent_id = analyst_type
object.__setattr__(self, "agent_id", agent_id)
object.__setattr__(self, "config", config or {})
object.__setattr__(self, "toolkit", toolkit)
sys_prompt = self._load_system_prompt()
kwargs = {
"name": agent_id,
"sys_prompt": sys_prompt,
"model": model,
"formatter": formatter,
"toolkit": toolkit,
"memory": InMemoryMemory(),
"max_iters": 10,
}
if long_term_memory:
kwargs["long_term_memory"] = long_term_memory
kwargs["long_term_memory_mode"] = "static_control"
super().__init__(**kwargs)
def _load_system_prompt(self) -> str:
"""Load system prompt for analyst"""
return build_agent_system_prompt(
agent_id=self.agent_id,
config_name=self.config.get("config_name", "default"),
toolkit=self.toolkit,
)
async def reply(self, x: Msg = None) -> Msg:
"""
Override reply method to add progress tracking
Args:
x: Input message (content must be str)
Returns:
Response message (content is str)
"""
ticker = None
if x and hasattr(x, "metadata") and x.metadata:
ticker = x.metadata.get("tickers")
if ticker:
progress.update_status(
self.name,
ticker,
f"Starting {self.analyst_persona} analysis",
)
result = await super().reply(x)
if ticker:
progress.update_status(
self.name,
ticker,
"Analysis completed",
)
return result
def reload_runtime_assets(self, active_skill_dirs: Optional[list] = None) -> None:
"""Reload toolkit and system prompt from current run assets."""
from .toolkit_factory import create_agent_toolkit
clear_prompt_factory_cache()
self.toolkit = create_agent_toolkit(
self.agent_id,
self.config.get("config_name", "default"),
active_skill_dirs=active_skill_dirs,
)
self._apply_runtime_sys_prompt(self._load_system_prompt())
def _apply_runtime_sys_prompt(self, sys_prompt: str) -> None:
"""Update the prompt used by future turns and the cached system msg."""
self._sys_prompt = sys_prompt
for msg, _marks in self.memory.content:
if getattr(msg, "role", None) == "system":
msg.content = sys_prompt
break

View File

@@ -0,0 +1,57 @@
# -*- coding: utf-8 -*-
"""Base agent module for 大时代.
提供Agent基础类、命令处理、工具守卫和钩子管理等功能。
"""
# 命令处理器 (从command_handler.py导入)
from .command_handler import (
AgentCommandDispatcher,
CommandContext,
CommandHandler,
CommandResult,
create_command_dispatcher,
)
# 评估钩子 (从evaluation_hook.py导入)
from .evaluation_hook import (
EvaluationHook,
EvaluationCollector,
MetricType,
EvaluationMetric,
EvaluationResult,
parse_evaluation_hooks,
)
# 技能适配钩子 (从skill_adaptation_hook.py导入)
from .skill_adaptation_hook import (
AdaptationAction,
AdaptationThreshold,
AdaptationEvent,
SkillAdaptationHook,
AdaptationManager,
get_adaptation_manager,
)
__all__ = [
# 命令处理
"AgentCommandDispatcher",
"CommandContext",
"CommandHandler",
"CommandResult",
"create_command_dispatcher",
# 评估钩子
"EvaluationHook",
"EvaluationCollector",
"MetricType",
"EvaluationMetric",
"EvaluationResult",
"parse_evaluation_hooks",
# 技能适配钩子
"AdaptationAction",
"AdaptationThreshold",
"AdaptationEvent",
"SkillAdaptationHook",
"AdaptationManager",
"get_adaptation_manager",
]

View File

@@ -0,0 +1,543 @@
# -*- coding: utf-8 -*-
"""Agent command handler for system commands.
This module handles system commands like /save, /compact, /skills, /reload, etc.
参考CoPaw设计为EvoAgent提供命令处理能力。
"""
import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Protocol
if TYPE_CHECKING:
from .agent import EvoAgent
logger = logging.getLogger(__name__)
@dataclass
class CommandResult:
"""命令执行结果"""
success: bool
message: str
data: Dict[str, Any] = field(default_factory=dict)
class CommandContext:
"""命令执行上下文"""
def __init__(self, agent: "EvoAgent", raw_query: str, args: str = ""):
self.agent = agent
self.raw_query = raw_query
self.args = args
self.config_name = getattr(agent, "config_name", "default")
self.agent_id = getattr(agent, "agent_id", "unknown")
class CommandHandler(ABC):
"""命令处理器抽象基类"""
@abstractmethod
async def handle(self, ctx: CommandContext) -> CommandResult:
"""处理命令"""
pass
class SaveCommandHandler(CommandHandler):
"""处理 /save <message> 命令 - 保存内容到MEMORY.md"""
async def handle(self, ctx: CommandContext) -> CommandResult:
message = ctx.args.strip()
if not message:
return CommandResult(
success=False,
message="Usage: /save <message>\n请提供要保存的内容。"
)
try:
memory_path = self._get_memory_path(ctx)
memory_path.parent.mkdir(parents=True, exist_ok=True)
timestamp = self._get_timestamp()
entry = f"\n## {timestamp}\n\n{message}\n"
with open(memory_path, "a", encoding="utf-8") as f:
f.write(entry)
return CommandResult(
success=True,
message=f"✅ 内容已保存到 MEMORY.md\n- 路径: {memory_path}\n- 长度: {len(message)} 字符",
data={"path": str(memory_path), "length": len(message)}
)
except Exception as e:
logger.error(f"Failed to save to MEMORY.md: {e}")
return CommandResult(
success=False,
message=f"❌ 保存失败: {str(e)}"
)
def _get_memory_path(self, ctx: CommandContext) -> Path:
"""获取MEMORY.md路径"""
from backend.agents.skills_manager import SkillsManager
sm = SkillsManager()
asset_dir = sm.get_agent_asset_dir(ctx.config_name, ctx.agent_id)
return asset_dir / "MEMORY.md"
def _get_timestamp(self) -> str:
"""获取当前时间戳"""
from datetime import datetime
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
class CompactCommandHandler(CommandHandler):
"""处理 /compact 命令 - 压缩记忆"""
async def handle(self, ctx: CommandContext) -> CommandResult:
try:
agent = ctx.agent
memory_manager = getattr(agent, "memory_manager", None)
if memory_manager is None:
return CommandResult(
success=False,
message="❌ Memory Manager 未启用\n\n- 记忆压缩功能不可用\n- 请在配置中启用 memory_manager"
)
messages = await self._get_messages(agent)
if not messages:
return CommandResult(
success=False,
message="⚠️ 没有可压缩的消息\n\n- 当前记忆为空\n- 无需执行压缩"
)
compact_content = await memory_manager.compact_memory(messages)
await self._update_compressed_summary(agent, compact_content)
return CommandResult(
success=True,
message=f"✅ 记忆压缩完成\n\n- 压缩了 {len(messages)} 条消息\n- 摘要长度: {len(compact_content)} 字符",
data={"message_count": len(messages), "summary_length": len(compact_content)}
)
except Exception as e:
logger.error(f"Failed to compact memory: {e}")
return CommandResult(
success=False,
message=f"❌ 压缩失败: {str(e)}"
)
async def _get_messages(self, agent: "EvoAgent") -> List[Any]:
"""获取Agent的记忆消息"""
memory = getattr(agent, "memory", None)
if memory is None:
return []
return await memory.get_memory() if hasattr(memory, "get_memory") else []
async def _update_compressed_summary(self, agent: "EvoAgent", content: str) -> None:
"""更新压缩摘要"""
memory = getattr(agent, "memory", None)
if memory and hasattr(memory, "update_compressed_summary"):
await memory.update_compressed_summary(content)
class SkillsListCommandHandler(CommandHandler):
"""处理 /skills list 命令 - 列出已激活技能"""
async def handle(self, ctx: CommandContext) -> CommandResult:
try:
from backend.agents.skills_manager import SkillsManager
sm = SkillsManager()
active_skills = sm.list_active_skill_metadata(ctx.config_name, ctx.agent_id)
catalog = sm.list_agent_skill_catalog(ctx.config_name, ctx.agent_id)
lines = ["📋 技能列表", ""]
if active_skills:
lines.append("✅ 已激活技能:")
for skill in active_skills:
lines.append(f"{skill.name} - {skill.description[:50]}...")
else:
lines.append("⚠️ 当前没有激活的技能")
lines.append("")
lines.append(f"📚 可用技能总数: {len(catalog)}")
lines.append("💡 使用 /skills enable <name> 启用技能")
return CommandResult(
success=True,
message="\n".join(lines),
data={
"active_count": len(active_skills),
"catalog_count": len(catalog),
"active": [s.skill_name for s in active_skills]
}
)
except Exception as e:
logger.error(f"Failed to list skills: {e}")
return CommandResult(
success=False,
message=f"❌ 获取技能列表失败: {str(e)}"
)
class SkillsEnableCommandHandler(CommandHandler):
"""处理 /skills enable <name> 命令 - 启用技能"""
async def handle(self, ctx: CommandContext) -> CommandResult:
skill_name = ctx.args.strip()
if not skill_name:
return CommandResult(
success=False,
message="Usage: /skills enable <skill_name>\n请提供技能名称。"
)
try:
from backend.agents.skills_manager import SkillsManager
sm = SkillsManager()
result = sm.update_agent_skill_overrides(
ctx.config_name,
ctx.agent_id,
enable=[skill_name]
)
return CommandResult(
success=True,
message=f"✅ 技能已启用: {skill_name}\n\n已启用技能: {', '.join(result['enabled_skills'])}",
data=result
)
except Exception as e:
logger.error(f"Failed to enable skill: {e}")
return CommandResult(
success=False,
message=f"❌ 启用技能失败: {str(e)}"
)
class SkillsDisableCommandHandler(CommandHandler):
"""处理 /skills disable <name> 命令 - 禁用技能"""
async def handle(self, ctx: CommandContext) -> CommandResult:
skill_name = ctx.args.strip()
if not skill_name:
return CommandResult(
success=False,
message="Usage: /skills disable <skill_name>\n请提供技能名称。"
)
try:
from backend.agents.skills_manager import SkillsManager
sm = SkillsManager()
result = sm.update_agent_skill_overrides(
ctx.config_name,
ctx.agent_id,
disable=[skill_name]
)
return CommandResult(
success=True,
message=f"✅ 技能已禁用: {skill_name}\n\n已禁用技能: {', '.join(result['disabled_skills'])}",
data=result
)
except Exception as e:
logger.error(f"Failed to disable skill: {e}")
return CommandResult(
success=False,
message=f"❌ 禁用技能失败: {str(e)}"
)
class SkillsInstallCommandHandler(CommandHandler):
"""处理 /skills install <name> 命令 - 安装技能"""
async def handle(self, ctx: CommandContext) -> CommandResult:
skill_name = ctx.args.strip()
if not skill_name:
return CommandResult(
success=False,
message="Usage: /skills install <skill_name>\n请提供技能名称。"
)
try:
from backend.agents.skills_manager import SkillsManager
from backend.agents.skill_loader import load_skill_from_dir
sm = SkillsManager()
# 查找技能源目录
source_dir = self._resolve_skill_source(sm, skill_name)
if not source_dir:
return CommandResult(
success=False,
message=f"❌ 技能未找到: {skill_name}\n\n请检查技能名称是否正确,或技能是否存在于 builtin/customized 目录。"
)
# 加载并验证技能
skill_info = load_skill_from_dir(source_dir)
if not skill_info:
return CommandResult(
success=False,
message=f"❌ 技能加载失败: {skill_name}\n\n技能格式可能不正确。"
)
# 安装到agent的installed目录
installed_root = sm.get_agent_installed_root(ctx.config_name, ctx.agent_id)
target_dir = installed_root / skill_name
import shutil
if target_dir.exists():
shutil.rmtree(target_dir)
shutil.copytree(source_dir, target_dir)
return CommandResult(
success=True,
message=f"✅ 技能已安装: {skill_name}\n\n- 名称: {skill_info.get('name', skill_name)}\n- 版本: {skill_info.get('version', 'unknown')}\n- 路径: {target_dir}",
data={"skill_name": skill_name, "target_dir": str(target_dir)}
)
except Exception as e:
logger.error(f"Failed to install skill: {e}")
return CommandResult(
success=False,
message=f"❌ 安装技能失败: {str(e)}"
)
def _resolve_skill_source(self, sm: "SkillsManager", skill_name: str) -> Optional[Path]:
"""解析技能源目录"""
for root in [sm.customized_root, sm.builtin_root]:
candidate = root / skill_name
if candidate.exists() and (candidate / "SKILL.md").exists():
return candidate
return None
class ReloadCommandHandler(CommandHandler):
"""处理 /reload 命令 - 重新加载配置"""
async def handle(self, ctx: CommandContext) -> CommandResult:
try:
agent = ctx.agent
# 重新加载配置
if hasattr(agent, "reload_config"):
await agent.reload_config()
# 重新加载技能
from backend.agents.skills_manager import SkillsManager
sm = SkillsManager()
# 刷新技能同步
active_root = sm.get_agent_active_root(ctx.config_name, ctx.agent_id)
if active_root.exists():
# 清除缓存,强制重新加载
import shutil
for item in active_root.iterdir():
if item.is_dir():
shutil.rmtree(item)
return CommandResult(
success=True,
message="✅ 配置已重新加载\n\n- Agent配置已刷新\n- 技能缓存已清除\n- 请重启对话以应用所有更改",
data={"config_name": ctx.config_name, "agent_id": ctx.agent_id}
)
except Exception as e:
logger.error(f"Failed to reload config: {e}")
return CommandResult(
success=False,
message=f"❌ 重新加载失败: {str(e)}"
)
class StatusCommandHandler(CommandHandler):
"""处理 /status 命令 - 显示Agent状态"""
async def handle(self, ctx: CommandContext) -> CommandResult:
try:
agent = ctx.agent
lines = ["📊 Agent 状态", ""]
lines.append(f"🆔 Agent ID: {ctx.agent_id}")
lines.append(f"⚙️ Config: {ctx.config_name}")
# 模型信息
model = getattr(agent, "model", None)
if model:
lines.append(f"🤖 Model: {model}")
# 记忆状态
memory = getattr(agent, "memory", None)
if memory:
msg_count = len(getattr(memory, "content", []))
lines.append(f"💾 Memory: {msg_count} messages")
# 技能状态
from backend.agents.skills_manager import SkillsManager
sm = SkillsManager()
active_skills = sm.list_active_skill_metadata(ctx.config_name, ctx.agent_id)
lines.append(f"🔧 Active Skills: {len(active_skills)}")
# 工具组状态
toolkit = getattr(agent, "toolkit", None)
if toolkit:
groups = getattr(toolkit, "tool_groups", {})
active_groups = [name for name, g in groups.items() if getattr(g, "active", False)]
lines.append(f"🛠️ Active Tool Groups: {', '.join(active_groups) if active_groups else 'None'}")
return CommandResult(
success=True,
message="\n".join(lines),
data={
"agent_id": ctx.agent_id,
"config_name": ctx.config_name,
"active_skills_count": len(active_skills)
}
)
except Exception as e:
logger.error(f"Failed to get status: {e}")
return CommandResult(
success=False,
message=f"❌ 获取状态失败: {str(e)}"
)
class HelpCommandHandler(CommandHandler):
"""处理 /help 命令 - 显示帮助"""
async def handle(self, ctx: CommandContext) -> CommandResult:
help_text = """📖 EvoAgent 命令帮助
可用命令:
/save <message> - 保存内容到 MEMORY.md
/compact - 压缩记忆
/skills list - 列出已激活技能
/skills enable <name> - 启用技能
/skills disable <name>- 禁用技能
/skills install <name>- 安装技能
/reload - 重新加载配置
/status - 显示Agent状态
/help - 显示此帮助信息
提示:
• 所有命令以 / 开头
• 命令不区分大小写
• 使用 Tab 键可自动补全命令
"""
return CommandResult(success=True, message=help_text)
class AgentCommandDispatcher:
"""Agent命令分发器
参考CoPaw的CommandHandler设计为EvoAgent提供统一的命令处理入口。
"""
# 支持的系统命令
SYSTEM_COMMANDS = frozenset({
"save", "compact",
"skills", "reload",
"status", "help"
})
def __init__(self):
self._handlers: Dict[str, CommandHandler] = {}
self._subcommands: Dict[str, Dict[str, CommandHandler]] = {}
self._register_default_handlers()
def _register_default_handlers(self) -> None:
"""注册默认命令处理器"""
self._handlers["save"] = SaveCommandHandler()
self._handlers["compact"] = CompactCommandHandler()
self._handlers["reload"] = ReloadCommandHandler()
self._handlers["status"] = StatusCommandHandler()
self._handlers["help"] = HelpCommandHandler()
# 子命令: /skills list/enable/disable/install
self._subcommands["skills"] = {
"list": SkillsListCommandHandler(),
"enable": SkillsEnableCommandHandler(),
"disable": SkillsDisableCommandHandler(),
"install": SkillsInstallCommandHandler(),
}
def is_command(self, query: str | None) -> bool:
"""检查是否为命令
Args:
query: 用户输入字符串
Returns:
True 如果是系统命令
"""
if not isinstance(query, str) or not query.startswith("/"):
return False
parts = query.strip().lstrip("/").split()
if not parts:
return False
cmd = parts[0].lower()
# 检查主命令
if cmd in self.SYSTEM_COMMANDS:
return True
return False
async def handle(self, agent: "EvoAgent", query: str) -> CommandResult:
"""处理命令
Args:
agent: EvoAgent实例
query: 命令字符串
Returns:
命令执行结果
"""
if not self.is_command(query):
return CommandResult(
success=False,
message=f"未知命令: {query}\n使用 /help 查看可用命令。"
)
# 解析命令和参数
parts = query.strip().lstrip("/").split(maxsplit=1)
cmd = parts[0].lower()
args = parts[1] if len(parts) > 1 else ""
logger.info(f"Processing command: {cmd}, args: {args}")
# 处理子命令 (e.g., /skills list)
if cmd in self._subcommands:
sub_parts = args.split(maxsplit=1)
sub_cmd = sub_parts[0].lower() if sub_parts else ""
sub_args = sub_parts[1] if len(sub_parts) > 1 else ""
handlers = self._subcommands[cmd]
handler = handlers.get(sub_cmd)
if handler is None:
available = ", ".join(handlers.keys())
return CommandResult(
success=False,
message=f"未知子命令: {sub_cmd}\n可用子命令: {available}"
)
ctx = CommandContext(agent, query, sub_args)
return await handler.handle(ctx)
# 处理主命令
handler = self._handlers.get(cmd)
if handler is None:
return CommandResult(
success=False,
message=f"命令未实现: {cmd}"
)
ctx = CommandContext(agent, query, args)
return await handler.handle(ctx)
# 便捷函数
def create_command_dispatcher() -> AgentCommandDispatcher:
"""创建命令分发器实例"""
return AgentCommandDispatcher()

View File

@@ -0,0 +1,452 @@
# -*- coding: utf-8 -*-
"""Evaluation hooks system for skills.
Provides evaluation metric collection and storage for skill performance tracking.
Based on the evaluation hooks design in SKILL_TEMPLATE.md.
"""
from __future__ import annotations
import json
import logging
from dataclasses import dataclass, field, asdict
from datetime import datetime
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, Optional, Set
logger = logging.getLogger(__name__)
class MetricType(Enum):
"""Types of evaluation metrics."""
HIT_RATE = "hit_rate" # 信号命中率
RISK_VIOLATION = "risk_violation" # 风控违例率
POSITION_DEVIATION = "position_deviation" # 仓位偏离率
PnL_ATTRIBUTION = "pnl_attribution" # P&L 归因一致性
SIGNAL_CONSISTENCY = "signal_consistency" # 信号一致性
DECISION_LATENCY = "decision_latency" # 决策延迟
TOOL_USAGE = "tool_usage" # 工具使用率
CUSTOM = "custom" # 自定义指标
@dataclass
class EvaluationMetric:
"""A single evaluation metric."""
name: str
metric_type: MetricType
value: float
timestamp: str = field(default_factory=lambda: datetime.now().isoformat())
metadata: Dict[str, Any] = field(default_factory=dict)
def to_dict(self) -> Dict[str, Any]:
return {
"name": self.name,
"metric_type": self.metric_type.value,
"value": self.value,
"timestamp": self.timestamp,
"metadata": self.metadata,
}
@dataclass
class EvaluationResult:
"""Evaluation result for a skill execution."""
skill_name: str
run_id: str
agent_id: str
metrics: List[EvaluationMetric] = field(default_factory=list)
inputs: Dict[str, Any] = field(default_factory=dict)
outputs: Dict[str, Any] = field(default_factory=dict)
decision: Optional[str] = None
success: bool = True
error_message: Optional[str] = None
started_at: Optional[str] = None
completed_at: Optional[str] = field(default_factory=lambda: datetime.now().isoformat())
def to_dict(self) -> Dict[str, Any]:
return {
"skill_name": self.skill_name,
"run_id": self.run_id,
"agent_id": self.agent_id,
"metrics": [m.to_dict() for m in self.metrics],
"inputs": self.inputs,
"outputs": self.outputs,
"decision": self.decision,
"success": self.success,
"error_message": self.error_message,
"started_at": self.started_at,
"completed_at": self.completed_at,
}
class EvaluationHook:
"""Hook for collecting skill evaluation metrics.
This hook collects and stores evaluation metrics after skill execution
for later analysis and memory/reflection stages.
"""
def __init__(
self,
storage_dir: Path,
run_id: str,
agent_id: str,
):
"""Initialize evaluation hook.
Args:
storage_dir: Directory to store evaluation results
run_id: Current run identifier
agent_id: Current agent identifier
"""
self.storage_dir = Path(storage_dir)
self.run_id = run_id
self.agent_id = agent_id
self._current_evaluation: Optional[EvaluationResult] = None
def start_evaluation(
self,
skill_name: str,
inputs: Dict[str, Any],
) -> None:
"""Start a new evaluation session.
Args:
skill_name: Name of the skill being evaluated
inputs: Input parameters for the skill
"""
self._current_evaluation = EvaluationResult(
skill_name=skill_name,
run_id=self.run_id,
agent_id=self.agent_id,
inputs=inputs,
started_at=datetime.now().isoformat(),
)
logger.debug(f"Started evaluation for skill: {skill_name}")
def add_metric(
self,
name: str,
metric_type: MetricType,
value: float,
metadata: Optional[Dict[str, Any]] = None,
) -> None:
"""Add an evaluation metric.
Args:
name: Metric name
metric_type: Type of metric
value: Metric value
metadata: Additional metadata
"""
if self._current_evaluation is None:
logger.warning("No active evaluation session, ignoring metric")
return
metric = EvaluationMetric(
name=name,
metric_type=metric_type,
value=value,
metadata=metadata or {},
)
self._current_evaluation.metrics.append(metric)
logger.debug(f"Added metric: {name} = {value}")
def add_metrics(self, metrics: List[EvaluationMetric]) -> None:
"""Add multiple evaluation metrics at once.
Args:
metrics: List of metrics to add
"""
if self._current_evaluation is None:
logger.warning("No active evaluation session, ignoring metrics")
return
self._current_evaluation.metrics.extend(metrics)
def record_outputs(self, outputs: Dict[str, Any]) -> None:
"""Record skill outputs.
Args:
outputs: Output from skill execution
"""
if self._current_evaluation is None:
logger.warning("No active evaluation session, ignoring outputs")
return
self._current_evaluation.outputs = outputs
def record_decision(self, decision: str) -> None:
"""Record the final decision.
Args:
decision: Final decision made by the skill
"""
if self._current_evaluation is None:
logger.warning("No active evaluation session, ignoring decision")
return
self._current_evaluation.decision = decision
def complete_evaluation(
self,
success: bool = True,
error_message: Optional[str] = None,
) -> Optional[EvaluationResult]:
"""Complete the evaluation session and persist results.
Args:
success: Whether the skill execution was successful
error_message: Error message if failed
Returns:
The completed evaluation result, or None if no active evaluation
"""
if self._current_evaluation is None:
logger.warning("No active evaluation to complete")
return None
self._current_evaluation.success = success
self._current_evaluation.error_message = error_message
self._current_evaluation.completed_at = datetime.now().isoformat()
# Persist to storage
result = self._persist_evaluation(self._current_evaluation)
self._current_evaluation = None
logger.debug(f"Completed evaluation for skill: {result.skill_name}")
return result
def _persist_evaluation(self, evaluation: EvaluationResult) -> EvaluationResult:
"""Persist evaluation result to storage.
Args:
evaluation: Evaluation result to persist
Returns:
The persisted evaluation
"""
# Create run-specific directory
run_dir = self.storage_dir / self.run_id
run_dir.mkdir(parents=True, exist_ok=True)
# Create agent-specific subdirectory
agent_dir = run_dir / self.agent_id
agent_dir.mkdir(parents=True, exist_ok=True)
# Generate filename with timestamp
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
filename = f"{evaluation.skill_name}_{timestamp}.json"
filepath = agent_dir / filename
# Write evaluation result
try:
with open(filepath, "w", encoding="utf-8") as f:
json.dump(evaluation.to_dict(), f, ensure_ascii=False, indent=2)
logger.info(f"Persisted evaluation to: {filepath}")
except Exception as e:
logger.error(f"Failed to persist evaluation: {e}")
return evaluation
def cancel_evaluation(self) -> None:
"""Cancel the current evaluation session without saving."""
if self._current_evaluation is not None:
logger.debug(f"Cancelled evaluation for: {self._current_evaluation.skill_name}")
self._current_evaluation = None
class EvaluationCollector:
"""Collector for aggregating evaluation metrics across runs.
Provides methods to query and analyze evaluation results.
"""
def __init__(self, storage_dir: Path):
"""Initialize evaluation collector.
Args:
storage_dir: Root directory containing evaluation results
"""
self.storage_dir = Path(storage_dir)
def get_run_evaluations(
self,
run_id: str,
agent_id: Optional[str] = None,
) -> List[EvaluationResult]:
"""Get all evaluations for a run.
Args:
run_id: Run identifier
agent_id: Optional agent identifier to filter by
Returns:
List of evaluation results
"""
run_dir = self.storage_dir / run_id
if not run_dir.exists():
return []
evaluations = []
agent_dirs = [run_dir / agent_id] if agent_id else run_dir.iterdir()
for agent_dir in agent_dirs:
if not agent_dir.is_dir():
continue
for eval_file in agent_dir.glob("*.json"):
try:
with open(eval_file, "r", encoding="utf-8") as f:
data = json.load(f)
evaluations.append(self._parse_evaluation(data))
except Exception as e:
logger.warning(f"Failed to load evaluation {eval_file}: {e}")
return evaluations
def get_skill_metrics(
self,
skill_name: str,
run_ids: Optional[List[str]] = None,
) -> List[EvaluationMetric]:
"""Get all metrics for a specific skill.
Args:
skill_name: Name of the skill
run_ids: Optional list of run IDs to filter by
Returns:
List of metrics for the skill
"""
metrics = []
if run_ids is None:
run_ids = [d.name for d in self.storage_dir.iterdir() if d.is_dir()]
for run_id in run_ids:
evaluations = self.get_run_evaluations(run_id)
for eval_result in evaluations:
if eval_result.skill_name == skill_name:
metrics.extend(eval_result.metrics)
return metrics
def calculate_skill_stats(
self,
skill_name: str,
metric_type: MetricType,
run_ids: Optional[List[str]] = None,
) -> Dict[str, float]:
"""Calculate statistics for a specific metric type.
Args:
skill_name: Name of the skill
metric_type: Type of metric to calculate
run_ids: Optional list of run IDs to filter by
Returns:
Dictionary with min, max, avg, count statistics
"""
metrics = self.get_skill_metrics(skill_name, run_ids)
filtered = [m for m in metrics if m.metric_type == metric_type]
if not filtered:
return {"count": 0}
values = [m.value for m in filtered]
return {
"count": len(values),
"min": min(values),
"max": max(values),
"avg": sum(values) / len(values),
}
def _parse_evaluation(self, data: Dict[str, Any]) -> EvaluationResult:
"""Parse evaluation data into EvaluationResult.
Args:
data: Raw evaluation data
Returns:
Parsed EvaluationResult
"""
metrics = []
for m in data.get("metrics", []):
metrics.append(EvaluationMetric(
name=m["name"],
metric_type=MetricType(m["metric_type"]),
value=m["value"],
timestamp=m.get("timestamp", ""),
metadata=m.get("metadata", {}),
))
return EvaluationResult(
skill_name=data["skill_name"],
run_id=data["run_id"],
agent_id=data["agent_id"],
metrics=metrics,
inputs=data.get("inputs", {}),
outputs=data.get("outputs", {}),
decision=data.get("decision"),
success=data.get("success", True),
error_message=data.get("error_message"),
started_at=data.get("started_at"),
completed_at=data.get("completed_at"),
)
def parse_evaluation_hooks(skill_dir: Path) -> Dict[str, Any]:
"""Parse evaluation hooks from SKILL.md.
Extracts the Optional: Evaluation hooks section from skill documentation.
Args:
skill_dir: Skill directory path
Returns:
Dictionary containing evaluation hook definitions
"""
skill_md = skill_dir / "SKILL.md"
if not skill_md.exists():
return {}
try:
content = skill_md.read_text(encoding="utf-8")
# Extract evaluation hooks section
if "## Optional: Evaluation hooks" in content:
start = content.find("## Optional: Evaluation hooks")
# Find the next ## section or end of file
next_section = content.find("\n## ", start + 1)
if next_section == -1:
eval_section = content[start:]
else:
eval_section = content[start:next_section]
# Parse metrics from the section
metrics = []
for metric_type in MetricType:
if metric_type.value.replace("_", " ") in eval_section.lower():
metrics.append(metric_type.value)
return {
"supported_metrics": metrics,
"section_content": eval_section.strip(),
}
except Exception as e:
logger.warning(f"Failed to parse evaluation hooks: {e}")
return {}
__all__ = [
"MetricType",
"EvaluationMetric",
"EvaluationResult",
"EvaluationHook",
"EvaluationCollector",
"parse_evaluation_hooks",
]

View File

@@ -0,0 +1,510 @@
# -*- coding: utf-8 -*-
"""EvoAgent - Core agent implementation for 大时代.
This module provides the main EvoAgent class built on AgentScope's ReActAgent,
with integrated tools, skills, and memory management based on CoPaw design.
Key features:
- Workspace-driven configuration from Markdown files
- Dynamic skill loading from skills/active directories
- Tool-guard security interception
- Hook system for extensibility
- Runtime skill and prompt reloading
"""
from __future__ import annotations
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional, Type, TYPE_CHECKING
from agentscope.agent import ReActAgent
from agentscope.memory import InMemoryMemory
from agentscope.message import Msg
from agentscope.tool import Toolkit
from .tool_guard import ToolGuardMixin
from .hooks import (
HookManager,
BootstrapHook,
MemoryCompactionHook,
WorkspaceWatchHook,
HOOK_PRE_REASONING,
)
from ..prompts.builder import (
PromptBuilder,
build_system_prompt_from_workspace,
)
from ..agent_workspace import load_agent_workspace_config
from ..skills_manager import SkillsManager
# Team infrastructure imports (graceful import - may not exist yet)
try:
from backend.agents.team.messenger import AgentMessenger
from backend.agents.team.task_delegator import TaskDelegator
TEAM_INFRA_AVAILABLE = True
except ImportError:
TEAM_INFRA_AVAILABLE = False
AgentMessenger = None
TaskDelegator = None
if TYPE_CHECKING:
from agentscope.formatter import FormatterBase
from agentscope.model import ModelWrapperBase
logger = logging.getLogger(__name__)
class EvoAgent(ToolGuardMixin, ReActAgent):
"""EvoAgent with integrated tools, skills, and memory management.
This agent extends ReActAgent with:
- Workspace-driven configuration from AGENTS.md/SOUL.md/PROFILE.md/etc.
- Dynamic skill loading from skills/active directories
- Tool-guard security interception (via ToolGuardMixin)
- Hook system for extensibility (bootstrap, memory compaction)
- Runtime skill and prompt reloading
MRO note
~~~~~~~~
``ToolGuardMixin`` overrides ``_acting`` and ``_reasoning`` via
Python's MRO: EvoAgent → ToolGuardMixin → ReActAgent.
Example:
agent = EvoAgent(
agent_id="fundamentals_analyst",
config_name="smoke_fullstack",
workspace_dir=Path("runs/smoke_fullstack/agents/fundamentals_analyst"),
model=model_instance,
formatter=formatter_instance,
)
"""
def __init__(
self,
agent_id: str,
config_name: str,
workspace_dir: Path,
model: "ModelWrapperBase",
formatter: "FormatterBase",
skills_manager: Optional[SkillsManager] = None,
sys_prompt: Optional[str] = None,
max_iters: int = 10,
memory: Optional[Any] = None,
enable_tool_guard: bool = True,
enable_bootstrap_hook: bool = True,
enable_memory_compaction: bool = False,
memory_manager: Optional[Any] = None,
memory_compact_threshold: Optional[int] = None,
env_context: Optional[str] = None,
prompt_files: Optional[List[str]] = None,
):
"""Initialize EvoAgent.
Args:
agent_id: Unique identifier for this agent
config_name: Run configuration name (e.g., "smoke_fullstack")
workspace_dir: Agent workspace directory containing markdown files
model: LLM model instance
formatter: Message formatter instance
skills_manager: Optional SkillsManager instance
sys_prompt: Optional override for system prompt
max_iters: Maximum reasoning-acting iterations
memory: Optional memory instance (defaults to InMemoryMemory)
enable_tool_guard: Enable tool-guard security interception
enable_bootstrap_hook: Enable bootstrap guidance on first interaction
enable_memory_compaction: Enable automatic memory compaction
memory_manager: Optional memory manager for compaction
memory_compact_threshold: Token threshold for memory compaction
env_context: Optional environment context to prepend to system prompt
prompt_files: List of markdown files to load (defaults to standard set)
"""
self.agent_id = agent_id
self.config_name = config_name
self.workspace_dir = Path(workspace_dir)
self._skills_manager = skills_manager or SkillsManager()
self._env_context = env_context
self._prompt_files = prompt_files
# Initialize tool guard
if enable_tool_guard:
self._init_tool_guard()
# Load agent configuration from workspace
self._agent_config = self._load_agent_config()
# Build or use provided system prompt
if sys_prompt is not None:
self._sys_prompt = sys_prompt
else:
self._sys_prompt = self._build_system_prompt()
# Create toolkit with skills
toolkit = self._create_toolkit()
# Initialize hook manager
self._hook_manager = HookManager()
# Initialize parent ReActAgent
super().__init__(
name=agent_id,
model=model,
sys_prompt=self._sys_prompt,
toolkit=toolkit,
memory=memory or InMemoryMemory(),
formatter=formatter,
max_iters=max_iters,
)
# Register hooks
self._register_hooks(
enable_bootstrap=enable_bootstrap_hook,
enable_memory_compaction=enable_memory_compaction,
memory_manager=memory_manager,
memory_compact_threshold=memory_compact_threshold,
)
# Initialize team infrastructure if available
self._messenger: Optional["AgentMessenger"] = None
self._task_delegator: Optional["TaskDelegator"] = None
if TEAM_INFRA_AVAILABLE:
self._init_team_infrastructure()
logger.info(
"EvoAgent initialized: %s (workspace: %s)",
agent_id,
workspace_dir,
)
def _load_agent_config(self) -> Dict[str, Any]:
"""Load agent configuration from workspace.
Returns:
Agent configuration dictionary
"""
config_path = self.workspace_dir / "agent.yaml"
if config_path.exists():
loaded = load_agent_workspace_config(config_path)
return dict(loaded.values)
return {}
def _build_system_prompt(self) -> str:
"""Build system prompt from workspace markdown files.
Uses PromptBuilder to load and combine AGENTS.md, SOUL.md,
PROFILE.md, and other configured files.
Returns:
Complete system prompt string
"""
prompt = build_system_prompt_from_workspace(
workspace_dir=self.workspace_dir,
enabled_files=self._prompt_files,
agent_id=self.agent_id,
extra_context=self._env_context,
)
return prompt
def _create_toolkit(self) -> Toolkit:
"""Create and populate toolkit with agent skills.
Loads skills from the agent's active skills directory and
registers them with the toolkit.
Returns:
Configured Toolkit instance
"""
toolkit = Toolkit(
agent_skill_instruction=(
"<system-info>You have access to specialized skills. "
"Each skill lives in a directory and is described by SKILL.md. "
"Follow the skill instructions when they are relevant to the current task."
"</system-info>"
),
agent_skill_template="- {name} (dir: {dir}): {description}",
)
# Register skills from active directory
active_skills_dir = self._skills_manager.get_agent_active_root(
self.config_name,
self.agent_id,
)
if active_skills_dir.exists():
for skill_dir in sorted(active_skills_dir.iterdir()):
if skill_dir.is_dir() and (skill_dir / "SKILL.md").exists():
try:
toolkit.register_agent_skill(str(skill_dir))
logger.debug("Registered skill: %s", skill_dir.name)
except Exception as e:
logger.error(
"Failed to register skill '%s': %s",
skill_dir.name,
e,
)
return toolkit
def _register_hooks(
self,
enable_bootstrap: bool,
enable_memory_compaction: bool,
memory_manager: Optional[Any],
memory_compact_threshold: Optional[int],
) -> None:
"""Register agent hooks.
Args:
enable_bootstrap: Enable bootstrap hook
enable_memory_compaction: Enable memory compaction hook
memory_manager: Memory manager instance
memory_compact_threshold: Token threshold for compaction
"""
# Bootstrap hook - checks BOOTSTRAP.md on first interaction
if enable_bootstrap:
bootstrap_hook = BootstrapHook(
workspace_dir=self.workspace_dir,
language="zh",
)
self._hook_manager.register(
hook_type=HOOK_PRE_REASONING,
hook_name="bootstrap",
hook=bootstrap_hook,
)
logger.debug("Registered bootstrap hook")
# Memory compaction hook
if enable_memory_compaction and memory_manager is not None:
compaction_hook = MemoryCompactionHook(
memory_manager=memory_manager,
memory_compact_threshold=memory_compact_threshold,
)
self._hook_manager.register(
hook_type=HOOK_PRE_REASONING,
hook_name="memory_compaction",
hook=compaction_hook,
)
logger.debug("Registered memory compaction hook")
# Workspace watch hook - auto-reload markdown files on change
workspace_watch_hook = WorkspaceWatchHook(
workspace_dir=self.workspace_dir,
)
self._hook_manager.register(
hook_type=HOOK_PRE_REASONING,
hook_name="workspace_watch",
hook=workspace_watch_hook,
)
logger.debug("Registered workspace watch hook")
async def _reasoning(self, **kwargs) -> Msg:
"""Override reasoning to execute pre-reasoning hooks.
Args:
**kwargs: Arguments for reasoning
Returns:
Response message
"""
# Execute pre-reasoning hooks
kwargs = await self._hook_manager.execute(
hook_type=HOOK_PRE_REASONING,
agent=self,
kwargs=kwargs,
)
# Call parent (which may be ToolGuardMixin's _reasoning)
return await super()._reasoning(**kwargs)
def reload_skills(self, active_skill_dirs: Optional[List[Path]] = None) -> None:
"""Reload skills at runtime.
Rebuilds the toolkit with current skills from the active directory.
Args:
active_skill_dirs: Optional list of specific skill directories to load
"""
logger.info("Reloading skills for agent: %s", self.agent_id)
# Create new toolkit
new_toolkit = Toolkit(
agent_skill_instruction=(
"<system-info>You have access to specialized skills. "
"Each skill lives in a directory and is described by SKILL.md. "
"Follow the skill instructions when they are relevant to the current task."
"</system-info>"
),
agent_skill_template="- {name} (dir: {dir}): {description}",
)
# Register skills
if active_skill_dirs is None:
active_skills_dir = self._skills_manager.get_agent_active_root(
self.config_name,
self.agent_id,
)
if active_skills_dir.exists():
active_skill_dirs = [
d for d in active_skills_dir.iterdir()
if d.is_dir() and (d / "SKILL.md").exists()
]
else:
active_skill_dirs = []
for skill_dir in active_skill_dirs:
if skill_dir.exists() and (skill_dir / "SKILL.md").exists():
try:
new_toolkit.register_agent_skill(str(skill_dir))
logger.debug("Reloaded skill: %s", skill_dir.name)
except Exception as e:
logger.error(
"Failed to reload skill '%s': %s",
skill_dir.name,
e,
)
# Replace toolkit
self.toolkit = new_toolkit
logger.info("Skills reloaded for agent: %s", self.agent_id)
def rebuild_sys_prompt(self) -> None:
"""Rebuild and replace the system prompt at runtime.
Useful after updating AGENTS.md, SOUL.md, PROFILE.md, etc.
to ensure the prompt reflects the latest configuration.
Updates both self._sys_prompt and the first system-role
message stored in self.memory.content.
"""
logger.info("Rebuilding system prompt for agent: %s", self.agent_id)
# Reload agent config in case it changed
self._agent_config = self._load_agent_config()
# Rebuild prompt
self._sys_prompt = self._build_system_prompt()
# Update memory if system message exists
if hasattr(self, "memory") and self.memory.content:
for msg, _marks in self.memory.content:
if getattr(msg, "role", None) == "system":
msg.content = self._sys_prompt
logger.debug("Updated system message in memory")
break
logger.info("System prompt rebuilt for agent: %s", self.agent_id)
async def reply(
self,
msg: Msg | List[Msg] | None = None,
structured_model: Optional[Type[Any]] = None,
) -> Msg:
"""Process a message and return a response.
Args:
msg: Input message(s) from user
structured_model: Optional pydantic model for structured output
Returns:
Response message
"""
# Handle list of messages
if isinstance(msg, list):
# Process each message in sequence
for m in msg[:-1]:
await self.memory.add(m)
msg = msg[-1] if msg else None
return await super().reply(msg=msg, structured_model=structured_model)
def get_agent_info(self) -> Dict[str, Any]:
"""Get agent information.
Returns:
Dictionary with agent metadata
"""
return {
"agent_id": self.agent_id,
"config_name": self.config_name,
"workspace_dir": str(self.workspace_dir),
"skills_count": len([
s for s in self._skills_manager.list_active_skill_metadata(
self.config_name,
self.agent_id,
)
]),
"registered_hooks": self._hook_manager.list_hooks(),
"team_infra_available": TEAM_INFRA_AVAILABLE,
}
def _init_team_infrastructure(self) -> None:
"""Initialize team infrastructure components (messenger and task delegator).
This method initializes the AgentMessenger for inter-agent communication
and the TaskDelegator for subagent delegation.
"""
if not TEAM_INFRA_AVAILABLE:
return
try:
self._messenger = AgentMessenger(agent_id=self.agent_id)
self._task_delegator = TaskDelegator(agent=self)
logger.debug(
"Team infrastructure initialized for agent: %s",
self.agent_id,
)
except Exception as e:
logger.warning(
"Failed to initialize team infrastructure for %s: %s",
self.agent_id,
e,
)
self._messenger = None
self._task_delegator = None
@property
def messenger(self) -> Optional["AgentMessenger"]:
"""Get the agent's messenger for inter-agent communication.
Returns:
AgentMessenger instance if available, None otherwise
"""
return self._messenger
async def delegate_task(
self,
task_type: str,
task_data: Dict[str, Any],
target_agent: Optional[str] = None,
) -> Dict[str, Any]:
"""Delegate a task to a subagent using the TaskDelegator.
Args:
task_type: Type of task to delegate
task_data: Data/payload for the task
target_agent: Optional specific agent ID to delegate to
Returns:
Dict containing the delegation result
"""
if not TEAM_INFRA_AVAILABLE or self._task_delegator is None:
return {
"success": False,
"error": "Team infrastructure not available",
}
try:
return await self._task_delegator.delegate_task(
task_type=task_type,
task_data=task_data,
target_agent=target_agent,
)
except Exception as e:
logger.error(
"Task delegation failed for %s: %s",
self.agent_id,
e,
)
return {"success": False, "error": str(e)}
__all__ = ["EvoAgent"]

View File

@@ -0,0 +1,613 @@
# -*- coding: utf-8 -*-
"""Hook system for EvoAgent.
Provides pre_reasoning and post_acting hooks with built-in implementations:
- BootstrapHook: First-time setup guidance
- MemoryCompactionHook: Automatic memory compression
Based on CoPaw's hooks design.
"""
from __future__ import annotations
import logging
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from agentscope.agent import ReActAgent
logger = logging.getLogger(__name__)
# Hook types
HookType = str
HOOK_PRE_REASONING: HookType = "pre_reasoning"
HOOK_POST_ACTING: HookType = "post_acting"
class Hook(ABC):
"""Abstract base class for agent hooks."""
@abstractmethod
async def __call__(
self,
agent: "ReActAgent",
kwargs: Dict[str, Any],
) -> Optional[Dict[str, Any]]:
"""Execute the hook.
Args:
agent: The agent instance
kwargs: Input arguments to the method being hooked
Returns:
Modified kwargs or None to use original
"""
pass
class HookManager:
"""Manages agent hooks.
Provides registration and execution of hooks for different
lifecycle events in the agent's operation.
"""
def __init__(self):
self._hooks: Dict[HookType, List[tuple[str, Hook]]] = {
HOOK_PRE_REASONING: [],
HOOK_POST_ACTING: [],
}
def register(
self,
hook_type: HookType,
hook_name: str,
hook: Hook | Callable,
) -> None:
"""Register a hook.
Args:
hook_type: Type of hook (pre_reasoning, post_acting)
hook_name: Unique name for this hook
hook: Hook instance or callable
"""
# Remove existing hook with same name
self._hooks[hook_type] = [
(name, h) for name, h in self._hooks[hook_type] if name != hook_name
]
self._hooks[hook_type].append((hook_name, hook))
logger.debug("Registered hook '%s' for type '%s'", hook_name, hook_type)
def unregister(self, hook_type: HookType, hook_name: str) -> bool:
"""Unregister a hook.
Args:
hook_type: Type of hook
hook_name: Name of the hook to remove
Returns:
True if hook was found and removed
"""
original_len = len(self._hooks[hook_type])
self._hooks[hook_type] = [
(name, h) for name, h in self._hooks[hook_type] if name != hook_name
]
removed = len(self._hooks[hook_type]) < original_len
if removed:
logger.debug("Unregistered hook '%s' from type '%s'", hook_name, hook_type)
return removed
async def execute(
self,
hook_type: HookType,
agent: "ReActAgent",
kwargs: Dict[str, Any],
) -> Dict[str, Any]:
"""Execute all hooks of a given type.
Args:
hook_type: Type of hooks to execute
agent: The agent instance
kwargs: Input arguments
Returns:
Potentially modified kwargs
"""
for name, hook in self._hooks[hook_type]:
try:
result = await hook(agent, kwargs)
if result is not None:
kwargs = result
except Exception as e:
logger.error("Hook '%s' failed: %s", name, e, exc_info=True)
return kwargs
def list_hooks(self, hook_type: Optional[HookType] = None) -> List[str]:
"""List registered hook names.
Args:
hook_type: Optional type to filter by
Returns:
List of hook names
"""
if hook_type:
return [name for name, _ in self._hooks.get(hook_type, [])]
names = []
for hooks in self._hooks.values():
names.extend([name for name, _ in hooks])
return names
class BootstrapHook(Hook):
"""Hook for bootstrap guidance on first user interaction.
This hook looks for a BOOTSTRAP.md file in the working directory
and if found, prepends guidance to the first user message to help
establish the agent's identity and user preferences.
"""
def __init__(
self,
workspace_dir: Path,
language: str = "zh",
):
"""Initialize bootstrap hook.
Args:
workspace_dir: Working directory containing BOOTSTRAP.md
language: Language code for bootstrap guidance (en/zh)
"""
self.workspace_dir = Path(workspace_dir)
self.language = language
self._completed_flag = self.workspace_dir / ".bootstrap_completed"
def _is_first_user_interaction(self, agent: "ReActAgent") -> bool:
"""Check if this is the first user interaction.
Args:
agent: The agent instance
Returns:
True if first user interaction
"""
if not hasattr(agent, "memory") or not agent.memory.content:
return True
# Count user messages (excluding system)
user_count = sum(
1 for msg, _ in agent.memory.content if msg.role == "user"
)
return user_count <= 1
def _build_bootstrap_guidance(self) -> str:
"""Build bootstrap guidance message.
Returns:
Formatted bootstrap guidance
"""
if self.language == "zh":
return (
"# 引导模式\n"
"\n"
"工作目录中存在 `BOOTSTRAP.md` — 首次设置。\n"
"\n"
"1. 阅读 BOOTSTRAP.md友好地表示初次见面"
"引导用户完成设置。\n"
"2. 按照 BOOTSTRAP.md 的指示,"
"帮助用户定义你的身份和偏好。\n"
"3. 按指南创建/更新必要文件"
"PROFILE.md、MEMORY.md 等)。\n"
"4. 完成后删除 BOOTSTRAP.md。\n"
"\n"
"如果用户希望跳过,直接回答下面的问题即可。\n"
"\n"
"---\n"
"\n"
)
return (
"# BOOTSTRAP MODE\n"
"\n"
"`BOOTSTRAP.md` exists — first-time setup.\n"
"\n"
"1. Read BOOTSTRAP.md, greet the user, "
"and guide them through setup.\n"
"2. Follow BOOTSTRAP.md instructions "
"to define identity and preferences.\n"
"3. Create/update files "
"(PROFILE.md, MEMORY.md, etc.) as described.\n"
"4. Delete BOOTSTRAP.md when done.\n"
"\n"
"If the user wants to skip, answer their "
"question directly instead.\n"
"\n"
"---\n"
"\n"
)
async def __call__(
self,
agent: "ReActAgent",
kwargs: Dict[str, Any],
) -> Optional[Dict[str, Any]]:
"""Check and load BOOTSTRAP.md on first user interaction.
Args:
agent: The agent instance
kwargs: Input arguments to the _reasoning method
Returns:
None (hook doesn't modify kwargs)
"""
try:
bootstrap_path = self.workspace_dir / "BOOTSTRAP.md"
# Check if bootstrap has already been triggered
if self._completed_flag.exists():
return None
if not bootstrap_path.exists():
return None
if not self._is_first_user_interaction(agent):
return None
bootstrap_guidance = self._build_bootstrap_guidance()
logger.debug("Found BOOTSTRAP.md [%s], prepending guidance", self.language)
# Prepend to first user message in memory
if hasattr(agent, "memory") and agent.memory.content:
system_count = sum(
1 for msg, _ in agent.memory.content if msg.role == "system"
)
for msg, _ in agent.memory.content[system_count:]:
if msg.role == "user":
# Prepend guidance to message content
original_content = msg.content
msg.content = bootstrap_guidance + original_content
break
logger.debug("Bootstrap guidance prepended to first user message")
# Create completion flag to prevent repeated triggering
self._completed_flag.touch()
logger.debug("Created bootstrap completion flag")
except Exception as e:
logger.error("Failed to process bootstrap: %s", e, exc_info=True)
return None
class WorkspaceWatchHook(Hook):
"""Hook for auto-reloading workspace markdown files on change.
Monitors SOUL.md, AGENTS.md, PROFILE.md, etc. and triggers
a prompt rebuild when any of them change. Based on CoPaw's
AgentConfigWatcher approach but for markdown files.
"""
# Files to monitor (same as PromptBuilder.DEFAULT_FILES)
WATCHED_FILES = frozenset([
"SOUL.md", "AGENTS.md", "PROFILE.md",
"POLICY.md", "MEMORY.md",
"BOOTSTRAP.md",
])
def __init__(
self,
workspace_dir: Path,
poll_interval: float = 2.0,
):
"""Initialize workspace watch hook.
Args:
workspace_dir: Workspace directory to monitor
poll_interval: How often to check for changes (seconds)
"""
self.workspace_dir = Path(workspace_dir)
self.poll_interval = poll_interval
self._last_mtimes: dict[str, float] = {}
self._initialized = False
def _scan_mtimes(self) -> dict[str, float]:
"""Scan watched files and return their current mtimes."""
mtimes = {}
for name in self.WATCHED_FILES:
path = self.workspace_dir / name
if path.exists():
mtimes[name] = path.stat().st_mtime
return mtimes
def _has_changes(self) -> bool:
"""Check if any watched file has changed since last check."""
current = self._scan_mtimes()
if not self._initialized:
self._last_mtimes = current
self._initialized = True
return False
# Check for new, modified, or deleted files
if set(current.keys()) != set(self._last_mtimes.keys()):
self._last_mtimes = current
return True
for name, mtime in current.items():
if mtime != self._last_mtimes.get(name):
self._last_mtimes = current
return True
return False
async def __call__(
self,
agent: "ReActAgent",
kwargs: Dict[str, Any],
) -> Optional[Dict[str, Any]]:
"""Check for file changes and rebuild prompt if needed.
Args:
agent: The agent instance
kwargs: Input arguments (unused)
Returns:
None
"""
try:
if self._has_changes():
logger.info(
"Workspace files changed, triggering prompt rebuild for: %s",
getattr(agent, "agent_id", "unknown"),
)
if hasattr(agent, "rebuild_sys_prompt"):
agent.rebuild_sys_prompt()
else:
logger.warning(
"Agent %s has no rebuild_sys_prompt method",
getattr(agent, "agent_id", "unknown"),
)
except Exception as e:
logger.error("Workspace watch hook failed: %s", e, exc_info=True)
return None
class MemoryCompactionHook(Hook):
"""Hook for automatic memory compaction when context is full.
This hook monitors the token count of messages and triggers compaction
when it exceeds the threshold. It preserves the system prompt and recent
messages while summarizing older conversation history.
Based on CoPaw's memory compaction design with additional improvements:
- memory_compact_ratio: Ratio to compact when threshold reached
- memory_reserve_ratio: Always keep a reserve of tokens for recent messages
- enable_tool_result_compact: Compact tool results separately
- tool_result_compact_keep_n: Number of tool results to keep
"""
def __init__(
self,
memory_manager: Any,
memory_compact_threshold: Optional[int] = None,
memory_compact_ratio: float = 0.75,
memory_reserve_ratio: float = 0.1,
enable_tool_result_compact: bool = False,
tool_result_compact_keep_n: int = 5,
):
"""Initialize memory compaction hook.
Args:
memory_manager: Memory manager instance for compaction
memory_compact_threshold: Token threshold for compaction
memory_compact_ratio: Target ratio to compact to (e.g., 0.75 = compact to 75%)
memory_reserve_ratio: Reserve ratio to always keep free (e.g., 0.1 = 10%)
enable_tool_result_compact: Enable tool result compaction
tool_result_compact_keep_n: Number of tool results to keep
"""
self.memory_manager = memory_manager
self.memory_compact_threshold = memory_compact_threshold
self.memory_compact_ratio = memory_compact_ratio
self.memory_reserve_ratio = memory_reserve_ratio
self.enable_tool_result_compact = enable_tool_result_compact
self.tool_result_compact_keep_n = tool_result_compact_keep_n
async def __call__(
self,
agent: "ReActAgent",
kwargs: Dict[str, Any],
) -> Optional[Dict[str, Any]]:
"""Pre-reasoning hook to check and compact memory if needed.
Args:
agent: The agent instance
kwargs: Input arguments to the _reasoning method
Returns:
None (hook doesn't modify kwargs)
"""
try:
if not hasattr(agent, "memory") or not self.memory_manager:
return None
memory = agent.memory
# Get current token count estimate
messages = await memory.get_memory()
total_tokens = self._estimate_tokens(messages)
if self.memory_compact_threshold is None:
return None
if total_tokens < self.memory_compact_threshold:
return None
logger.info(
"Memory compaction triggered: %d tokens (threshold: %d)",
total_tokens,
self.memory_compact_threshold,
)
# Compact memory
await self._compact_memory(agent, messages)
except Exception as e:
logger.error("Failed to compact memory: %s", e, exc_info=True)
return None
def _estimate_tokens(self, messages: List[Any]) -> int:
"""Estimate token count for messages.
Args:
messages: List of messages
Returns:
Estimated token count
"""
# Simple estimation: ~4 chars per token
total_chars = sum(
len(str(getattr(msg, "content", "")))
for msg in messages
)
return total_chars // 4
async def _compact_memory(
self,
agent: "ReActAgent",
messages: List[Any],
) -> None:
"""Compact memory by summarizing older messages.
Uses CoPaw-style memory management:
- memory_compact_ratio: Target ratio to compact to (e.g., 0.75 means compact to 75%)
- memory_reserve_ratio: Always keep this ratio free (e.g., 0.1 means keep 10% for recent)
Args:
agent: The agent instance
messages: Current messages in memory
"""
if self.memory_compact_threshold is None:
return
# Estimate total tokens
total_tokens = self._estimate_tokens(messages)
# Calculate reserve based on ratio (CoPaw-style)
reserve_tokens = int(total_tokens * self.memory_reserve_ratio)
# Calculate target tokens after compaction
target_tokens = int(total_tokens * self.memory_compact_ratio)
target_tokens = max(target_tokens, total_tokens - reserve_tokens)
# Find messages to compact (older ones)
# Keep recent messages that fit within target
messages_to_compact = []
kept_tokens = 0
# Start from oldest, stop when we've kept enough
for msg in messages:
msg_tokens = self._estimate_tokens([msg])
if kept_tokens + msg_tokens > target_tokens:
messages_to_compact.append(msg)
else:
kept_tokens += msg_tokens
if not messages_to_compact:
return
logger.info(
"Compacting %d messages (%d tokens) to target %d tokens",
len(messages_to_compact),
self._estimate_tokens(messages_to_compact),
target_tokens,
)
# Use memory manager to compact if available
if hasattr(self.memory_manager, "compact_memory"):
try:
summary = await self.memory_manager.compact_memory(
messages=messages_to_compact,
)
logger.info(
"Memory compacted: %d messages summarized, summary: %s",
len(messages_to_compact),
summary[:200] if summary else "N/A",
)
# Mark messages as compressed if supported
if hasattr(agent.memory, "update_messages_mark"):
from agentscope.agent._react_agent import _MemoryMark
await agent.memory.update_messages_mark(
new_mark=_MemoryMark.COMPRESSED,
msg_ids=[msg.id for msg in messages_to_compact],
)
except Exception as e:
logger.error("Memory manager compaction failed: %s", e)
# Tool result compaction (CoPaw-style)
if self.enable_tool_result_compact:
await self._compact_tool_results(agent, messages)
async def _compact_tool_results(
self,
agent: "ReActAgent",
messages: List[Any],
) -> None:
"""Compact tool results by keeping only recent ones.
Based on CoPaw's tool_result_compact_keep_n pattern.
Tool results can be very verbose, so we keep only the N most recent ones.
Args:
agent: The agent instance
messages: Current messages in memory
"""
if not hasattr(agent.memory, "content"):
return
# Find tool result messages (usually have "tool" role or tool_related content)
tool_results = []
for msg, _ in agent.memory.content:
if hasattr(msg, "role") and msg.role == "tool":
tool_results.append(msg)
if len(tool_results) <= self.tool_result_compact_keep_n:
return
# Keep only the most recent N tool results
excess_results = tool_results[:-self.tool_result_compact_keep_n]
logger.info(
"Tool result compaction: %d tool results found, keeping %d, compacting %d",
len(tool_results),
self.tool_result_compact_keep_n,
len(excess_results),
)
# Mark excess tool results as compressed if supported
if hasattr(agent.memory, "update_messages_mark"):
from agentscope.agent._react_agent import _MemoryMark
await agent.memory.update_messages_mark(
new_mark=_MemoryMark.COMPRESSED,
msg_ids=[msg.id for msg in excess_results],
)
__all__ = [
"Hook",
"HookManager",
"HookType",
"HOOK_PRE_REASONING",
"HOOK_POST_ACTING",
"BootstrapHook",
"MemoryCompactionHook",
"WorkspaceWatchHook",
]

View File

@@ -0,0 +1,489 @@
# -*- coding: utf-8 -*-
"""Skill adaptation hook for automatic evaluation-to-iteration闭环.
Monitors evaluation metrics against configurable thresholds and triggers
automatic skill reload or logs warnings when thresholds are breached.
"""
from __future__ import annotations
import json
import logging
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, Optional, Set
from .evaluation_hook import (
EvaluationCollector,
EvaluationResult,
MetricType,
)
logger = logging.getLogger(__name__)
class AdaptationAction(Enum):
"""Actions to take when threshold is breached."""
RELOAD = "reload" # 自动重新加载技能
WARN = "warn" # 记录警告供人工审核
BOTH = "both" # 同时执行重载和警告
NONE = "none" # 不做任何操作
@dataclass
class AdaptationThreshold:
"""Threshold configuration for a metric."""
metric_type: MetricType
operator: str = "lt" # lt (less than), gt (greater than), lte, gte, eq
value: float = 0.0
window_size: int = 10 # 移动窗口大小,用于计算滑动平均
min_samples: int = 5 # 最少样本数才触发检查
action: AdaptationAction = AdaptationAction.WARN
cooldown_seconds: int = 300 # 触发后的冷却时间
def evaluate(self, current_value: float) -> bool:
"""Evaluate if threshold is breached."""
ops = {
"lt": lambda x, y: x < y,
"lte": lambda x, y: x <= y,
"gt": lambda x, y: x > y,
"gte": lambda x, y: x >= y,
"eq": lambda x, y: x == y,
}
op_func = ops.get(self.operator)
if op_func is None:
logger.warning(f"Unknown operator: {self.operator}")
return False
return op_func(current_value, self.value)
def to_dict(self) -> Dict[str, Any]:
return {
"metric_type": self.metric_type.value,
"operator": self.operator,
"value": self.value,
"window_size": self.window_size,
"min_samples": self.min_samples,
"action": self.action.value,
"cooldown_seconds": self.cooldown_seconds,
}
@dataclass
class AdaptationEvent:
"""Record of an adaptation trigger event."""
timestamp: str
skill_name: str
metric_type: MetricType
threshold: AdaptationThreshold
current_value: float
avg_value: float
action_taken: AdaptationAction
details: Dict[str, Any] = field(default_factory=dict)
def to_dict(self) -> Dict[str, Any]:
return {
"timestamp": self.timestamp,
"skill_name": self.skill_name,
"metric_type": self.metric_type.value,
"threshold": self.threshold.to_dict(),
"current_value": self.current_value,
"avg_value": self.avg_value,
"action_taken": self.action_taken.value,
"details": self.details,
}
class SkillAdaptationHook:
"""Hook for monitoring evaluation metrics and triggering skill adaptation.
This hook wraps EvaluationHook to add threshold-based adaptation logic.
When metrics breach configured thresholds, it can:
- Automatically reload skills via SkillsManager
- Log warnings for human review
- Both
"""
# Default thresholds for common metrics
DEFAULT_THRESHOLDS: List[AdaptationThreshold] = [
AdaptationThreshold(
metric_type=MetricType.HIT_RATE,
operator="lt",
value=0.5,
action=AdaptationAction.WARN,
cooldown_seconds=600,
),
AdaptationThreshold(
metric_type=MetricType.RISK_VIOLATION,
operator="gt",
value=0.1,
action=AdaptationAction.WARN,
cooldown_seconds=300,
),
AdaptationThreshold(
metric_type=MetricType.DECISION_LATENCY,
operator="gt",
value=5000, # 5 seconds
action=AdaptationAction.WARN,
cooldown_seconds=300,
),
]
def __init__(
self,
storage_dir: Path,
run_id: str,
agent_id: str,
thresholds: Optional[List[AdaptationThreshold]] = None,
collector: Optional[EvaluationCollector] = None,
):
"""Initialize skill adaptation hook.
Args:
storage_dir: Directory to store adaptation events
run_id: Current run identifier
agent_id: Current agent identifier
thresholds: Custom threshold configurations (uses defaults if None)
collector: Optional EvaluationCollector for historical data
"""
self.storage_dir = Path(storage_dir)
self.run_id = run_id
self.agent_id = agent_id
self.thresholds = thresholds or self.DEFAULT_THRESHOLDS
self.collector = collector or EvaluationCollector(storage_dir)
# Track cooldowns to prevent rapid re-triggering
self._cooldowns: Dict[str, datetime] = {}
# Store recent metrics in memory for quick access
self._recent_metrics: Dict[str, List[float]] = {}
# Pending adaptation events
self._pending_events: List[AdaptationEvent] = []
def check_threshold(
self,
skill_name: str,
metric_type: MetricType,
current_value: float,
) -> Optional[AdaptationEvent]:
"""Check if a metric breaches any threshold.
Args:
skill_name: Name of the skill
metric_type: Type of metric
current_value: Current metric value
Returns:
AdaptationEvent if threshold breached, None otherwise
"""
# Find applicable thresholds
applicable_thresholds = [
t for t in self.thresholds
if t.metric_type == metric_type
]
if not applicable_thresholds:
return None
# Check cooldown
cooldown_key = f"{skill_name}:{metric_type.value}"
now = datetime.now()
last_trigger = self._cooldowns.get(cooldown_key)
# Store current value first for avg calculation
self._store_metric(cooldown_key, current_value)
for threshold in applicable_thresholds:
if last_trigger:
elapsed = (now - last_trigger).total_seconds()
if elapsed < threshold.cooldown_seconds:
continue
# Evaluate threshold
if threshold.evaluate(current_value):
# Calculate moving average
avg_value = self._calculate_avg(skill_name, metric_type, current_value)
# Check minimum samples (allow immediate trigger if min_samples <= 1)
sample_count = len(self._recent_metrics.get(cooldown_key, []))
if threshold.min_samples > 1 and sample_count < threshold.min_samples:
# Not enough samples yet
continue
# Trigger adaptation
event = AdaptationEvent(
timestamp=now.isoformat(),
skill_name=skill_name,
metric_type=metric_type,
threshold=threshold,
current_value=current_value,
avg_value=avg_value,
action_taken=threshold.action,
details={
"run_id": self.run_id,
"agent_id": self.agent_id,
},
)
# Update cooldown
self._cooldowns[cooldown_key] = now
# Persist event
self._persist_event(event)
logger.info(
f"Threshold breached for {skill_name}.{metric_type.value}: "
f"current={current_value}, avg={avg_value}, action={threshold.action.value}"
)
return event
return None
def _calculate_avg(
self,
skill_name: str,
metric_type: MetricType,
current_value: float,
) -> float:
"""Calculate moving average for a metric."""
key = f"{skill_name}:{metric_type.value}"
values = self._recent_metrics.get(key, [])
if not values:
return current_value
return sum(values) / len(values)
def _store_metric(self, key: str, value: float) -> None:
"""Store metric value with sliding window."""
if key not in self._recent_metrics:
self._recent_metrics[key] = []
self._recent_metrics[key].append(value)
# Keep only last 100 values
if len(self._recent_metrics[key]) > 100:
self._recent_metrics[key] = self._recent_metrics[key][-100:]
def _persist_event(self, event: AdaptationEvent) -> None:
"""Persist adaptation event to storage."""
run_dir = self.storage_dir / self.run_id / "adaptations"
run_dir.mkdir(parents=True, exist_ok=True)
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
filename = f"{event.skill_name}_{event.metric_type.value}_{timestamp}.json"
filepath = run_dir / filename
try:
with open(filepath, "w", encoding="utf-8") as f:
json.dump(event.to_dict(), f, ensure_ascii=False, indent=2)
logger.debug(f"Persisted adaptation event to: {filepath}")
except Exception as e:
logger.error(f"Failed to persist adaptation event: {e}")
# Also add to pending list
self._pending_events.append(event)
def get_pending_warnings(self) -> List[AdaptationEvent]:
"""Get all pending warning events that need human review."""
return [
e for e in self._pending_events
if e.action_taken in (AdaptationAction.WARN, AdaptationAction.BOTH)
]
def clear_pending_warnings(self) -> None:
"""Clear pending warnings after they have been reviewed."""
self._pending_events = [
e for e in self._pending_events
if e.action_taken == AdaptationAction.RELOAD
]
def get_recent_events(
self,
skill_name: Optional[str] = None,
metric_type: Optional[MetricType] = None,
limit: int = 50,
) -> List[AdaptationEvent]:
"""Get recent adaptation events.
Args:
skill_name: Optional filter by skill name
metric_type: Optional filter by metric type
limit: Maximum number of events to return
Returns:
List of recent adaptation events
"""
events_dir = self.storage_dir / self.run_id / "adaptations"
if not events_dir.exists():
return []
events = []
for eval_file in sorted(events_dir.glob("*.json"), reverse=True)[:limit]:
try:
with open(eval_file, "r", encoding="utf-8") as f:
data = json.load(f)
event = self._parse_event(data)
if skill_name and event.skill_name != skill_name:
continue
if metric_type and event.metric_type != metric_type:
continue
events.append(event)
except Exception as e:
logger.warning(f"Failed to load adaptation event {eval_file}: {e}")
return events
def _parse_event(self, data: Dict[str, Any]) -> AdaptationEvent:
"""Parse adaptation event from JSON data."""
threshold_data = data.get("threshold", {})
metric_type = MetricType(threshold_data.get("metric_type", "custom"))
threshold = AdaptationThreshold(
metric_type=metric_type,
operator=threshold_data.get("operator", "lt"),
value=threshold_data.get("value", 0.0),
window_size=threshold_data.get("window_size", 10),
min_samples=threshold_data.get("min_samples", 5),
action=AdaptationAction(threshold_data.get("action", "warn")),
cooldown_seconds=threshold_data.get("cooldown_seconds", 300),
)
return AdaptationEvent(
timestamp=data.get("timestamp", ""),
skill_name=data.get("skill_name", ""),
metric_type=metric_type,
threshold=threshold,
current_value=data.get("current_value", 0.0),
avg_value=data.get("avg_value", 0.0),
action_taken=AdaptationAction(data.get("action_taken", "warn")),
details=data.get("details", {}),
)
def add_threshold(self, threshold: AdaptationThreshold) -> None:
"""Add a new threshold configuration."""
self.thresholds.append(threshold)
def remove_threshold(self, metric_type: MetricType) -> None:
"""Remove all thresholds for a specific metric type."""
self.thresholds = [
t for t in self.thresholds
if t.metric_type != metric_type
]
def update_threshold(
self,
metric_type: MetricType,
**kwargs,
) -> None:
"""Update threshold configuration for a metric type."""
for threshold in self.thresholds:
if threshold.metric_type == metric_type:
for key, value in kwargs.items():
if hasattr(threshold, key):
setattr(threshold, key, value)
def get_thresholds(self) -> List[AdaptationThreshold]:
"""Get current threshold configurations."""
return list(self.thresholds)
def is_in_cooldown(self, skill_name: str, metric_type: MetricType) -> bool:
"""Check if a skill/metric combination is in cooldown period."""
key = f"{skill_name}:{metric_type.value}"
last_trigger = self._cooldowns.get(key)
if not last_trigger:
return False
# Find the threshold for this metric type
for threshold in self.thresholds:
if threshold.metric_type == metric_type:
elapsed = (datetime.now() - last_trigger).total_seconds()
return elapsed < threshold.cooldown_seconds
return False
class AdaptationManager:
"""Manager for coordinating skill adaptation across multiple agents.
Provides centralized tracking of adaptation events and skill reloads.
"""
def __init__(self, storage_dir: Path):
"""Initialize adaptation manager.
Args:
storage_dir: Root directory for storing adaptation data
"""
self.storage_dir = Path(storage_dir)
self._hooks: Dict[str, SkillAdaptationHook] = {}
def get_hook(
self,
run_id: str,
agent_id: str,
thresholds: Optional[List[AdaptationThreshold]] = None,
) -> SkillAdaptationHook:
"""Get or create an adaptation hook for an agent.
Args:
run_id: Run identifier
agent_id: Agent identifier
thresholds: Optional custom thresholds
Returns:
SkillAdaptationHook instance
"""
key = f"{run_id}:{agent_id}"
if key not in self._hooks:
self._hooks[key] = SkillAdaptationHook(
storage_dir=self.storage_dir,
run_id=run_id,
agent_id=agent_id,
thresholds=thresholds,
)
return self._hooks[key]
def get_all_pending_warnings(self) -> List[AdaptationEvent]:
"""Get all pending warnings from all hooks."""
warnings = []
for hook in self._hooks.values():
warnings.extend(hook.get_pending_warnings())
return warnings
def get_run_adaptations(self, run_id: str) -> List[AdaptationEvent]:
"""Get all adaptation events for a run."""
events = []
for hook in self._hooks.values():
if hook.run_id == run_id:
events.extend(hook.get_recent_events())
return events
# Global manager instance
_adaptation_manager: Optional[AdaptationManager] = None
def get_adaptation_manager(storage_dir: Optional[Path] = None) -> AdaptationManager:
"""Get global adaptation manager instance.
Args:
storage_dir: Optional storage directory (required on first call)
Returns:
AdaptationManager instance
"""
global _adaptation_manager
if _adaptation_manager is None:
if storage_dir is None:
raise ValueError("storage_dir required on first initialization")
_adaptation_manager = AdaptationManager(storage_dir)
return _adaptation_manager
__all__ = [
"AdaptationAction",
"AdaptationThreshold",
"AdaptationEvent",
"SkillAdaptationHook",
"AdaptationManager",
"get_adaptation_manager",
]

View File

@@ -0,0 +1,684 @@
# -*- coding: utf-8 -*-
"""ToolGuardMixin - Security interception for dangerous tool calls.
Provides ``_acting`` and ``_reasoning`` overrides that intercept
sensitive tool calls before execution, implementing the deny /
guard / approve flow.
Based on CoPaw's tool_guard_mixin.py design.
"""
from __future__ import annotations
import asyncio
import json
import logging
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
from typing import Any, Callable, Dict, Iterable, List, Optional, Set
from agentscope.message import Msg
from backend.runtime.manager import get_global_runtime_manager
logger = logging.getLogger(__name__)
class SeverityLevel(str, Enum):
"""Risk severity level."""
LOW = "low"
MEDIUM = "medium"
HIGH = "high"
CRITICAL = "critical"
class ApprovalStatus(str, Enum):
"""Approval lifecycle state."""
PENDING = "pending"
APPROVED = "approved"
DENIED = "denied"
EXPIRED = "expired"
class ToolFindingRecord:
"""Internal representation of a guard finding."""
def __init__(self, severity: SeverityLevel, message: str, field: Optional[str] = None) -> None:
self.severity = severity
self.message = message
self.field = field
def to_dict(self) -> Dict[str, Any]:
return {
"severity": self.severity.value,
"message": self.message,
"field": self.field,
}
class ApprovalRecord:
"""Stores the state of an approval request."""
def __init__(
self,
approval_id: str,
tool_name: str,
tool_input: Dict[str, Any],
agent_id: str,
workspace_id: str,
session_id: Optional[str] = None,
findings: Optional[List[ToolFindingRecord]] = None,
) -> None:
self.approval_id = approval_id
self.tool_name = tool_name
self.tool_input = tool_input
self.agent_id = agent_id
self.workspace_id = workspace_id
self.session_id = session_id
self.status = ApprovalStatus.PENDING
self.findings = findings or []
self.created_at = datetime.utcnow()
self.resolved_at: Optional[datetime] = None
self.resolved_by: Optional[str] = None
self.metadata: Dict[str, Any] = {}
self.pending_request: "ToolApprovalRequest" | None = None
def to_dict(self) -> Dict[str, Any]:
return {
"approval_id": self.approval_id,
"status": self.status.value,
"tool_name": self.tool_name,
"tool_input": self.tool_input,
"agent_id": self.agent_id,
"workspace_id": self.workspace_id,
"session_id": self.session_id,
"findings": [f.to_dict() for f in self.findings],
"created_at": self.created_at.isoformat(),
"resolved_at": self.resolved_at.isoformat() if self.resolved_at else None,
"resolved_by": self.resolved_by,
}
class ToolGuardStore:
"""Simple in-memory approval store for development/testing."""
def __init__(self) -> None:
self._records: Dict[str, ApprovalRecord] = {}
self._counter = 0
def next_id(self) -> str:
self._counter += 1
return f"approval_{self._counter:06d}"
def list(
self,
status: ApprovalStatus | None = None,
workspace_id: Optional[str] = None,
agent_id: Optional[str] = None,
) -> Iterable[ApprovalRecord]:
for record in self._records.values():
if status and record.status != status:
continue
if workspace_id and record.workspace_id != workspace_id:
continue
if agent_id and record.agent_id != agent_id:
continue
yield record
def get(self, approval_id: str) -> Optional[ApprovalRecord]:
return self._records.get(approval_id)
def create_pending(
self,
tool_name: str,
tool_input: Dict[str, Any],
agent_id: str,
workspace_id: str,
session_id: Optional[str] = None,
findings: Optional[List[ToolFindingRecord]] = None,
) -> ApprovalRecord:
record = ApprovalRecord(
approval_id=self.next_id(),
tool_name=tool_name,
tool_input=tool_input,
agent_id=agent_id,
workspace_id=workspace_id,
session_id=session_id,
findings=findings,
)
self._records[record.approval_id] = record
return record
def set_status(
self,
approval_id: str,
status: ApprovalStatus,
resolved_by: Optional[str] = None,
notify_request: bool = True,
) -> ApprovalRecord:
record = self._records[approval_id]
if record.status == status:
return record
record.status = status
record.resolved_at = datetime.utcnow()
record.resolved_by = resolved_by
if notify_request and record.pending_request:
if status == ApprovalStatus.APPROVED:
record.pending_request.approve()
elif status == ApprovalStatus.DENIED:
record.pending_request.deny()
return record
def cancel(self, approval_id: str) -> None:
self._records.pop(approval_id, None)
TOOL_GUARD_STORE = ToolGuardStore()
def get_tool_guard_store() -> ToolGuardStore:
return TOOL_GUARD_STORE
# Default tools that require approval
DEFAULT_GUARDED_TOOLS: Set[str] = {
"execute_shell_command",
"write_file",
"edit_file",
"place_order",
"modify_position",
"delete_file",
}
# Default denied tools (cannot be approved)
DEFAULT_DENIED_TOOLS: Set[str] = {
"execute_shell_command", # Shell execution is dangerous
}
# Mark for tool guard denied messages
TOOL_GUARD_DENIED_MARK = "tool_guard_denied"
def default_findings_for_tool(tool_name: str) -> List[ToolFindingRecord]:
findings: List[ToolFindingRecord] = []
if tool_name in {"execute_trade", "modify_portfolio"}:
findings.append(
ToolFindingRecord(
severity=SeverityLevel.HIGH,
message=f"Tool '{tool_name}' touches portfolio state",
)
)
return findings
class ToolApprovalRequest:
"""Represents a pending tool approval request."""
def __init__(
self,
approval_id: str,
tool_name: str,
tool_input: Dict[str, Any],
tool_call_id: str,
session_id: Optional[str] = None,
):
self.approval_id = approval_id
self.tool_name = tool_name
self.tool_input = tool_input
self.tool_call_id = tool_call_id
self.session_id = session_id
self.approved: Optional[bool] = None
self._event = asyncio.Event()
async def wait_for_approval(self, timeout: Optional[float] = None) -> bool:
"""Wait for approval decision.
Args:
timeout: Maximum time to wait in seconds
Returns:
True if approved, False otherwise
"""
try:
await asyncio.wait_for(self._event.wait(), timeout=timeout)
except asyncio.TimeoutError:
return False
return self.approved is True
def approve(self) -> None:
"""Approve this request."""
self.approved = True
self._event.set()
def deny(self) -> None:
"""Deny this request."""
self.approved = False
self._event.set()
class ToolGuardMixin:
"""Mixin that adds tool-guard interception to a ReActAgent.
At runtime this class is combined with ReActAgent via MRO,
so ``super()._acting`` and ``super()._reasoning`` resolve to
the concrete agent methods.
Usage:
class MyAgent(ToolGuardMixin, ReActAgent):
def __init__(self, ...):
super().__init__(...)
self._init_tool_guard()
"""
def _init_tool_guard(
self,
guarded_tools: Optional[Set[str]] = None,
denied_tools: Optional[Set[str]] = None,
approval_timeout: float = 300.0,
) -> None:
"""Initialize tool guard.
Args:
guarded_tools: Set of tool names requiring approval
denied_tools: Set of tool names that are always denied
approval_timeout: Timeout for approval requests in seconds
"""
self._guarded_tools = guarded_tools or DEFAULT_GUARDED_TOOLS.copy()
self._denied_tools = denied_tools or DEFAULT_DENIED_TOOLS.copy()
self._approval_timeout = approval_timeout
self._pending_approval: Optional[ToolApprovalRequest] = None
self._approval_callback: Optional[Callable[[ToolApprovalRequest], None]] = None
self._approval_lock = asyncio.Lock()
def set_approval_callback(
self,
callback: Callable[[ToolApprovalRequest], None],
) -> None:
"""Set callback for approval requests.
Args:
callback: Function called when approval is needed
"""
self._approval_callback = callback
def _is_tool_guarded(self, tool_name: str) -> bool:
"""Check if a tool requires approval.
Args:
tool_name: Name of the tool
Returns:
True if tool requires approval
"""
return tool_name in self._guarded_tools
def _is_tool_denied(self, tool_name: str) -> bool:
"""Check if a tool is always denied.
Args:
tool_name: Name of the tool
Returns:
True if tool is denied
"""
return tool_name in self._denied_tools
def _last_tool_response_is_denied(self) -> bool:
"""Check if the last message is a guard-denied tool result."""
if not hasattr(self, "memory") or not self.memory.content:
return False
msg, marks = self.memory.content[-1]
return TOOL_GUARD_DENIED_MARK in marks and msg.role == "system"
async def _cleanup_tool_guard_denied_messages(
self,
include_denial_response: bool = True,
) -> None:
"""Remove tool-guard denied messages from memory.
Args:
include_denial_response: Also remove the assistant's denial explanation
"""
if not hasattr(self, "memory"):
return
ids_to_delete: list[str] = []
last_marked_idx = -1
for i, (msg, marks) in enumerate(self.memory.content):
if TOOL_GUARD_DENIED_MARK in marks:
ids_to_delete.append(msg.id)
last_marked_idx = i
if (
include_denial_response
and last_marked_idx >= 0
and last_marked_idx + 1 < len(self.memory.content)
):
next_msg, _ = self.memory.content[last_marked_idx + 1]
if next_msg.role == "assistant":
ids_to_delete.append(next_msg.id)
if ids_to_delete:
removed = await self.memory.delete(ids_to_delete)
logger.info("Tool guard: cleaned up %d denied message(s)", removed)
async def _request_guard_approval(
self,
tool_name: str,
tool_input: Dict[str, Any],
tool_call_id: str,
) -> bool:
"""Request approval for a guarded tool call.
This method creates a ToolApprovalRequest and waits for
external approval via approve_guard_call() or deny_guard_call().
Args:
tool_name: Name of the tool
tool_input: Tool input parameters
tool_call_id: ID of the tool call
Returns:
True if approved, False otherwise
"""
async with self._approval_lock:
record = TOOL_GUARD_STORE.create_pending(
tool_name=tool_name,
tool_input=tool_input,
agent_id=getattr(self, "agent_id", "unknown"),
workspace_id=getattr(self, "workspace_id", "default"),
session_id=getattr(self, "session_id", None),
findings=default_findings_for_tool(tool_name),
)
manager = get_global_runtime_manager()
if manager:
manager.register_pending_approval(
record.approval_id,
{
"tool_name": record.tool_name,
"agent_id": record.agent_id,
"workspace_id": record.workspace_id,
"session_id": record.session_id,
"tool_input": record.tool_input,
},
)
self._pending_approval = ToolApprovalRequest(
approval_id=record.approval_id,
tool_name=tool_name,
tool_input=tool_input,
tool_call_id=tool_call_id,
session_id=getattr(self, "session_id", None),
)
record.pending_request = self._pending_approval
# Notify via callback if set
if self._approval_callback:
self._approval_callback(self._pending_approval)
# Wait for approval (lock is released during wait, re-acquired after)
approval_request = self._pending_approval
# Wait for approval outside the lock to allow concurrent approval
approved = await approval_request.wait_for_approval(
timeout=self._approval_timeout
)
async with self._approval_lock:
if approval_request:
status = (
ApprovalStatus.APPROVED
if approval_request.approved is True
else ApprovalStatus.DENIED
if approval_request.approved is False
else ApprovalStatus.EXPIRED
)
TOOL_GUARD_STORE.set_status(
approval_request.approval_id,
status,
resolved_by="agent",
notify_request=False,
)
manager = get_global_runtime_manager()
if manager:
manager.resolve_pending_approval(
approval_request.approval_id,
resolved_by="agent",
status=status.value,
)
# Only clear if this is still the same request
if self._pending_approval is approval_request:
self._pending_approval = None
return approved
async def approve_guard_call(self, request_id: Optional[str] = None) -> bool:
"""Approve a pending guard request.
This method is called externally to approve a tool call
that is waiting for approval.
Args:
request_id: Optional request ID to verify (not yet implemented)
Returns:
True if a request was approved, False if no pending request
"""
async with self._approval_lock:
if self._pending_approval is None:
logger.warning("No pending approval request to approve")
return False
TOOL_GUARD_STORE.set_status(
self._pending_approval.approval_id,
ApprovalStatus.APPROVED,
resolved_by="agent",
notify_request=False,
)
manager = get_global_runtime_manager()
if manager:
manager.resolve_pending_approval(
self._pending_approval.approval_id,
resolved_by="agent",
status=ApprovalStatus.APPROVED.value,
)
self._pending_approval.approve()
logger.info("Approved tool call: %s", self._pending_approval.tool_name)
return True
async def deny_guard_call(self, request_id: Optional[str] = None) -> bool:
"""Deny a pending guard request.
This method is called externally to deny a tool call
that is waiting for approval.
Args:
request_id: Optional request ID to verify (not yet implemented)
Returns:
True if a request was denied, False if no pending request
"""
async with self._approval_lock:
if self._pending_approval is None:
logger.warning("No pending approval request to deny")
return False
TOOL_GUARD_STORE.set_status(
self._pending_approval.approval_id,
ApprovalStatus.DENIED,
resolved_by="agent",
notify_request=False,
)
manager = get_global_runtime_manager()
if manager:
manager.resolve_pending_approval(
self._pending_approval.approval_id,
resolved_by="agent",
status=ApprovalStatus.DENIED.value,
)
self._pending_approval.deny()
logger.info("Denied tool call: %s", self._pending_approval.tool_name)
return True
async def _acting(self, tool_call) -> dict | None:
"""Intercept sensitive tool calls before execution.
1. If tool is in denied_tools, auto-deny unconditionally.
2. Check for a one-shot pre-approval.
3. If tool is in the guarded scope, request approval.
4. Otherwise, delegate to parent _acting.
Args:
tool_call: Tool call from the model
Returns:
Tool result dict or None
"""
tool_name: str = tool_call.get("name", "")
tool_input: dict = tool_call.get("input", {})
tool_call_id: str = tool_call.get("id", "")
# Check if tool is denied
if tool_name and self._is_tool_denied(tool_name):
logger.warning("Tool '%s' is in the denied set, auto-denying", tool_name)
return await self._acting_auto_denied(tool_call, tool_name)
# Check if tool is guarded
if tool_name and self._is_tool_guarded(tool_name):
approved = await self._request_guard_approval(
tool_name=tool_name,
tool_input=tool_input,
tool_call_id=tool_call_id,
)
if not approved:
return await self._acting_with_denial(tool_call, tool_name)
# Call parent _acting
return await super()._acting(tool_call) # type: ignore[misc]
async def _acting_auto_denied(
self,
tool_call: Dict[str, Any],
tool_name: str,
) -> dict | None:
"""Auto-deny a tool call without offering approval.
Args:
tool_call: Tool call from the model
tool_name: Name of the denied tool
Returns:
Denial result
"""
from agentscope.message import ToolResultBlock
denied_text = (
f"⛔ **Tool Blocked / 工具已拦截**\n\n"
f"- Tool / 工具: `{tool_name}`\n"
f"- Reason / 原因: This tool is blocked for security reasons\n\n"
f"This tool is blocked and cannot be approved.\n"
f"该工具已被禁止,无法批准执行。"
)
tool_res_msg = Msg(
"system",
[
ToolResultBlock(
type="tool_result",
id=tool_call.get("id", ""),
name=tool_name,
output=[{"type": "text", "text": denied_text}],
),
],
"system",
)
await self.print(tool_res_msg, True)
await self.memory.add(tool_res_msg)
return None
async def _acting_with_denial(
self,
tool_call: Dict[str, Any],
tool_name: str,
) -> dict | None:
"""Deny the tool call after approval was rejected.
Args:
tool_call: Tool call from the model
tool_name: Name of the tool
Returns:
Denial result
"""
from agentscope.message import ToolResultBlock
params_text = json.dumps(
tool_call.get("input", {}),
ensure_ascii=False,
indent=2,
)
denied_text = (
f"⚠️ **Tool Call Denied / 工具调用被拒绝**\n\n"
f"- Tool / 工具: `{tool_name}`\n"
f"- Parameters / 参数:\n"
f"```json\n{params_text}\n```\n\n"
f"The tool call was denied by the user or timed out.\n"
f"工具调用被用户拒绝或已超时。"
)
tool_res_msg = Msg(
"system",
[
ToolResultBlock(
type="tool_result",
id=tool_call.get("id", ""),
name=tool_name,
output=[{"type": "text", "text": denied_text}],
),
],
"system",
)
await self.print(tool_res_msg, True)
await self.memory.add(tool_res_msg, marks=TOOL_GUARD_DENIED_MARK)
return None
async def _reasoning(self, **kwargs) -> Msg:
"""Short-circuit reasoning when awaiting guard approval.
If the last message was a guard denial, return a waiting message
instead of continuing reasoning.
Returns:
Response message
"""
if self._last_tool_response_is_denied():
msg = Msg(
self.name,
"⏳ Waiting for approval / 等待审批...\n\n"
"Type `/approve` to approve, or send any message to deny.\n"
"输入 `/approve` 批准执行,或发送任意消息拒绝。",
"assistant",
)
await self.print(msg, True)
await self.memory.add(msg)
return msg
return await super()._reasoning(**kwargs) # type: ignore[misc]
__all__ = [
"ToolGuardMixin",
"ToolApprovalRequest",
"DEFAULT_GUARDED_TOOLS",
"DEFAULT_DENIED_TOOLS",
"TOOL_GUARD_DENIED_MARK",
]

146
backend/agents/compat.py Normal file
View File

@@ -0,0 +1,146 @@
# -*- coding: utf-8 -*-
"""
Compatibility Layer - Adapters for legacy to EvoAgent migration.
Provides:
- LegacyAgentAdapter: Wraps old AnalystAgent to work with new interfaces
- Migration utilities for gradual adoption
"""
from typing import Any, Dict, Optional
from agentscope.message import Msg
from .agent_core import EvoAgent
class LegacyAgentAdapter:
"""
Adapter to make legacy AnalystAgent compatible with EvoAgent interfaces.
This allows gradual migration by wrapping existing agents.
"""
def __init__(self, legacy_agent: Any):
"""
Initialize adapter.
Args:
legacy_agent: Legacy AnalystAgent instance
"""
self._agent = legacy_agent
self.agent_id = getattr(legacy_agent, 'agent_id', getattr(legacy_agent, 'name', 'unknown'))
self.analyst_type = getattr(legacy_agent, 'analyst_type_key', None)
@property
def name(self) -> str:
"""Get agent name."""
return getattr(self._agent, 'name', self.agent_id)
@property
def toolkit(self) -> Any:
"""Get agent toolkit."""
return getattr(self._agent, 'toolkit', None)
@property
def model(self) -> Any:
"""Get agent model."""
return getattr(self._agent, 'model', None)
@property
def memory(self) -> Any:
"""Get agent memory."""
return getattr(self._agent, 'memory', None)
async def reply(self, x: Msg = None) -> Msg:
"""
Delegate to legacy agent's reply method.
Args:
x: Input message
Returns:
Response message
"""
return await self._agent.reply(x)
def reload_runtime_assets(self, active_skill_dirs: Optional[list] = None) -> None:
"""
Reload runtime assets if supported.
Args:
active_skill_dirs: Optional list of active skill directories
"""
if hasattr(self._agent, 'reload_runtime_assets'):
self._agent.reload_runtime_assets(active_skill_dirs)
def to_evo_agent(
self,
workspace_manager: Optional[Any] = None,
enable_tool_guard: bool = False,
) -> EvoAgent:
"""
Convert legacy agent to EvoAgent.
Args:
workspace_manager: Optional workspace manager
enable_tool_guard: Whether to enable tool guard
Returns:
New EvoAgent instance with same configuration
"""
return EvoAgent(
agent_id=self.agent_id,
model=self.model,
formatter=getattr(self._agent, 'formatter', None),
toolkit=self.toolkit,
workspace_manager=workspace_manager,
config=getattr(self._agent, 'config', {}),
long_term_memory=getattr(self._agent, 'long_term_memory', None),
enable_tool_guard=enable_tool_guard,
sys_prompt=getattr(self._agent, '_sys_prompt', None),
)
def __getattr__(self, name: str) -> Any:
"""Delegate unknown attributes to wrapped agent."""
return getattr(self._agent, name)
def is_legacy_agent(agent: Any) -> bool:
"""
Check if an agent is a legacy agent.
Args:
agent: Agent instance to check
Returns:
True if legacy agent
"""
return hasattr(agent, 'analyst_type_key') and not isinstance(agent, EvoAgent)
def adapt_agent(agent: Any) -> Any:
"""
Wrap agent in adapter if it's a legacy agent.
Args:
agent: Agent instance
Returns:
Adapted agent or original if already EvoAgent
"""
if is_legacy_agent(agent):
return LegacyAgentAdapter(agent)
return agent
def adapt_agents(agents: list) -> list:
"""
Wrap multiple agents in adapters.
Args:
agents: List of agent instances
Returns:
List of adapted agents
"""
return [adapt_agent(agent) for agent in agents]

332
backend/agents/factory.py Normal file
View File

@@ -0,0 +1,332 @@
# -*- coding: utf-8 -*-
"""Agent Factory - Dynamic creation and management of AgentConfigs."""
import logging
import shutil
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional
import yaml
logger = logging.getLogger(__name__)
@dataclass
class ModelConfig:
"""Model configuration for an agent."""
model_name: str = "gpt-4o"
temperature: float = 0.7
max_tokens: int = 4096
class AgentConfig:
"""Represents a configured agent instance (data class)."""
def __init__(
self,
agent_id: str,
agent_type: str,
workspace_id: str,
config_path: Path,
model_config: Optional[ModelConfig] = None,
):
self.agent_id = agent_id
self.agent_type = agent_type
self.workspace_id = workspace_id
self.config_path = config_path
self.model_config = model_config or ModelConfig()
self.agent_dir = config_path.parent
def to_dict(self) -> Dict[str, Any]:
"""Serialize agent to dictionary."""
return {
"agent_id": self.agent_id,
"agent_type": self.agent_type,
"workspace_id": self.workspace_id,
"config_path": str(self.config_path),
"agent_dir": str(self.agent_dir),
"model_config": {
"model_name": self.model_config.model_name,
"temperature": self.model_config.temperature,
"max_tokens": self.model_config.max_tokens,
},
}
class AgentFactory:
"""Factory for creating, cloning, and managing agents."""
def __init__(self, project_root: Optional[Path] = None):
"""Initialize the agent factory.
Args:
project_root: Root directory of the project
"""
self.project_root = project_root or Path(__file__).parent.parent.parent
self.workspaces_root = self.project_root / "workspaces"
self.template_dir = self.project_root / "backend" / "workspaces" / ".template"
def create_agent(
self,
agent_id: str,
agent_type: str,
workspace_id: str,
model_config: Optional[ModelConfig] = None,
clone_from: Optional[str] = None,
) -> AgentConfig:
"""Create a new agent.
Args:
agent_id: Unique identifier for the agent
agent_type: Type of agent (e.g., "technical_analyst")
workspace_id: ID of the workspace to create agent in
model_config: Model configuration
clone_from: Path to existing agent to clone from (optional)
Returns:
AgentConfig instance
Raises:
ValueError: If agent already exists or workspace doesn't exist
"""
workspace_dir = self.workspaces_root / workspace_id
if not workspace_dir.exists():
raise ValueError(f"Workspace '{workspace_id}' does not exist")
agent_dir = workspace_dir / "agents" / agent_id
if agent_dir.exists():
raise ValueError(f"Agent '{agent_id}' already exists in workspace '{workspace_id}'")
# Create directory structure
agent_dir.mkdir(parents=True, exist_ok=True)
(agent_dir / "skills" / "active").mkdir(parents=True, exist_ok=True)
(agent_dir / "skills" / "local").mkdir(parents=True, exist_ok=True)
(agent_dir / "skills" / "installed").mkdir(parents=True, exist_ok=True)
(agent_dir / "skills" / "disabled").mkdir(parents=True, exist_ok=True)
# Copy template or clone existing agent
if clone_from:
self._clone_agent_files(clone_from, agent_dir, agent_id)
else:
self._copy_template(agent_dir, agent_id, agent_type)
# Write agent.yaml
config_path = agent_dir / "agent.yaml"
self._write_agent_yaml(config_path, agent_id, agent_type, model_config)
return AgentConfig(
agent_id=agent_id,
agent_type=agent_type,
workspace_id=workspace_id,
config_path=config_path,
model_config=model_config,
)
def delete_agent(self, agent_id: str, workspace_id: str) -> bool:
"""Delete an agent and its workspace.
Args:
agent_id: ID of the agent to delete
workspace_id: ID of the workspace containing the agent
Returns:
True if deleted, False if agent didn't exist
"""
agent_dir = self.workspaces_root / workspace_id / "agents" / agent_id
if not agent_dir.exists():
return False
shutil.rmtree(agent_dir)
return True
def clone_agent(
self,
source_agent_id: str,
source_workspace_id: str,
new_agent_id: str,
target_workspace_id: Optional[str] = None,
model_config: Optional[ModelConfig] = None,
) -> AgentConfig:
"""Clone an existing agent.
Args:
source_agent_id: ID of the agent to clone
source_workspace_id: Workspace containing the source agent
new_agent_id: ID for the new agent
target_workspace_id: Target workspace (defaults to source workspace)
model_config: Optional new model configuration
Returns:
AgentConfig instance for the cloned agent
"""
target_workspace_id = target_workspace_id or source_workspace_id
source_dir = self.workspaces_root / source_workspace_id / "agents" / source_agent_id
if not source_dir.exists():
raise ValueError(f"Source agent '{source_agent_id}' not found")
# Load source agent config
source_config_path = source_dir / "agent.yaml"
source_config = {}
if source_config_path.exists():
with open(source_config_path, "r", encoding="utf-8") as f:
source_config = yaml.safe_load(f) or {}
agent_type = source_config.get("agent_type", "generic")
# Determine source path for cloning
clone_from = str(source_dir)
return self.create_agent(
agent_id=new_agent_id,
agent_type=agent_type,
workspace_id=target_workspace_id,
model_config=model_config,
clone_from=clone_from,
)
def list_agents(self, workspace_id: Optional[str] = None) -> List[Dict[str, Any]]:
"""List all agents.
Args:
workspace_id: Optional workspace to filter by
Returns:
List of agent information dictionaries
"""
agents = []
if workspace_id:
workspaces = [self.workspaces_root / workspace_id]
else:
if not self.workspaces_root.exists():
return agents
workspaces = [d for d in self.workspaces_root.iterdir() if d.is_dir()]
for workspace in workspaces:
agents_dir = workspace / "agents"
if not agents_dir.exists():
continue
for agent_dir in agents_dir.iterdir():
if not agent_dir.is_dir():
continue
config_path = agent_dir / "agent.yaml"
if config_path.exists():
try:
with open(config_path, "r", encoding="utf-8") as f:
config = yaml.safe_load(f) or {}
agents.append({
"agent_id": agent_dir.name,
"workspace_id": workspace.name,
"agent_type": config.get("agent_type", "unknown"),
"config_path": str(config_path),
})
except Exception as e:
logger.warning(f"Failed to load agent config {config_path}: {e}")
return agents
def _copy_template(
self,
agent_dir: Path,
agent_id: str,
agent_type: str,
) -> None:
"""Copy template files to agent directory.
Args:
agent_dir: Target agent directory
agent_id: ID of the agent
agent_type: Type of the agent
"""
# Create default markdown files
default_files = {
"AGENTS.md": f"# Agent Guide\n\nDocument how {agent_id} should work, collaborate, and choose tools or skills.\n\n",
"SOUL.md": f"# Soul\n\nDescribe {agent_id}'s temperament, reasoning posture, and voice.\n\n",
"PROFILE.md": f"# Profile\n\nTrack {agent_id}'s long-lived investment style, preferences, and strengths.\n\n",
"MEMORY.md": f"# Memory\n\nStore durable lessons, heuristics, and reminders for {agent_id}.\n\n",
"POLICY.md": f"# Policy\n\nOptional run-scoped constraints, limits, or strategy policy.\n\n",
}
for filename, content in default_files.items():
filepath = agent_dir / filename
if not filepath.exists():
filepath.write_text(content, encoding="utf-8")
def _clone_agent_files(self, source_path: str, target_dir: Path, new_agent_id: str) -> None:
"""Clone files from an existing agent.
Args:
source_path: Path to source agent directory
target_dir: Target agent directory
new_agent_id: ID for the new agent
"""
source_dir = Path(source_path)
if not source_dir.exists():
raise ValueError(f"Source path '{source_path}' does not exist")
# Copy markdown files
for md_file in source_dir.glob("*.md"):
target_file = target_dir / md_file.name
content = md_file.read_text(encoding="utf-8")
# Update agent references in content
source_name = source_dir.name
content = content.replace(source_name, new_agent_id)
target_file.write_text(content, encoding="utf-8")
# Copy skills directory structure (but not contents)
for skill_subdir in ["active", "local", "installed", "disabled"]:
source_skills = source_dir / "skills" / skill_subdir
if source_skills.exists():
target_skills = target_dir / "skills" / skill_subdir
target_skills.mkdir(parents=True, exist_ok=True)
# Copy skill files
for skill_file in source_skills.iterdir():
if skill_file.is_file():
shutil.copy2(skill_file, target_skills / skill_file.name)
def _write_agent_yaml(
self,
config_path: Path,
agent_id: str,
agent_type: str,
model_config: Optional[ModelConfig] = None,
) -> None:
"""Write agent.yaml configuration file.
Args:
config_path: Path to write configuration
agent_id: Agent ID
agent_type: Agent type
model_config: Optional model configuration
"""
config = {
"agent_id": agent_id,
"agent_type": agent_type,
"prompt_files": [
"SOUL.md",
"PROFILE.md",
"AGENTS.md",
"POLICY.md",
"MEMORY.md",
],
"enabled_skills": [],
"disabled_skills": [],
"active_tool_groups": [],
"disabled_tool_groups": [],
}
if model_config:
config["model"] = {
"name": model_config.model_name,
"temperature": model_config.temperature,
"max_tokens": model_config.max_tokens,
}
with open(config_path, "w", encoding="utf-8") as f:
yaml.safe_dump(config, f, allow_unicode=True, sort_keys=False)

View File

@@ -0,0 +1,388 @@
# -*- coding: utf-8 -*-
"""
Portfolio Manager Agent - Based on AgentScope ReActAgent
Responsible for decision-making (NOT trade execution)
"""
from pathlib import Path
from typing import Any, Dict, Optional, Callable
from agentscope.agent import ReActAgent
from agentscope.memory import InMemoryMemory, LongTermMemoryBase
from agentscope.message import Msg, TextBlock
from agentscope.tool import Toolkit, ToolResponse
from ..utils.progress import progress
from .prompt_factory import build_agent_system_prompt, clear_prompt_factory_cache
from .team_pipeline_config import update_active_analysts
from ..config.constants import ANALYST_TYPES
class PMAgent(ReActAgent):
"""
Portfolio Manager Agent - Makes investment decisions
Key features:
1. PM outputs decisions only (action + quantity per ticker)
2. Trade execution happens externally (in pipeline/executor)
3. Supports both backtest and live modes
"""
def __init__(
self,
name: str = "portfolio_manager",
model: Any = None,
formatter: Any = None,
initial_cash: float = 100000.0,
margin_requirement: float = 0.25,
config: Optional[Dict[str, Any]] = None,
long_term_memory: Optional[LongTermMemoryBase] = None,
toolkit_factory: Any = None,
toolkit_factory_kwargs: Optional[Dict[str, Any]] = None,
toolkit: Optional[Toolkit] = None,
):
object.__setattr__(self, "config", config or {})
# Portfolio state
object.__setattr__(
self,
"portfolio",
{
"cash": initial_cash,
"positions": {},
"margin_used": 0.0,
"margin_requirement": margin_requirement,
},
)
# Decisions made in current cycle
object.__setattr__(self, "_decisions", {})
toolkit_factory_kwargs = toolkit_factory_kwargs or {}
object.__setattr__(self, "_toolkit_factory", toolkit_factory)
object.__setattr__(
self,
"_toolkit_factory_kwargs",
toolkit_factory_kwargs,
)
object.__setattr__(self, "_create_team_agent_cb", None)
object.__setattr__(self, "_remove_team_agent_cb", None)
# Create toolkit after local state is ready so bound tool methods can be registered.
if toolkit is None:
if toolkit_factory is not None:
toolkit = toolkit_factory(
name,
self.config.get("config_name", "default"),
owner=self,
**toolkit_factory_kwargs,
)
else:
toolkit = self._create_toolkit()
object.__setattr__(self, "toolkit", toolkit)
sys_prompt = build_agent_system_prompt(
agent_id=name,
config_name=self.config.get("config_name", "default"),
toolkit=self.toolkit,
)
kwargs = {
"name": name,
"sys_prompt": sys_prompt,
"model": model,
"formatter": formatter,
"toolkit": toolkit,
"memory": InMemoryMemory(),
"max_iters": 10,
}
if long_term_memory:
kwargs["long_term_memory"] = long_term_memory
kwargs["long_term_memory_mode"] = "both"
super().__init__(**kwargs)
def _create_toolkit(self) -> Toolkit:
"""Create toolkit with decision recording tool"""
toolkit = Toolkit()
toolkit.register_tool_function(self._make_decision)
return toolkit
def _make_decision(
self,
ticker: str,
action: str,
quantity: int,
confidence: int = 50,
reasoning: str = "",
) -> ToolResponse:
"""
Record a trading decision for a ticker.
Args:
ticker: Stock ticker symbol (e.g., "AAPL")
action: Decision - "long", "short" or "hold"
quantity: Number of shares to trade (0 for hold)
confidence: Confidence level 0-100
reasoning: Explanation for this decision
Returns:
ToolResponse confirming decision recorded
"""
if action not in ["long", "short", "hold"]:
return ToolResponse(
content=[
TextBlock(
type="text",
text=f"Invalid action: {action}. "
"Must be 'long', 'short', or 'hold'.",
),
],
)
self._decisions[ticker] = {
"action": action,
"quantity": quantity if action != "hold" else 0,
"confidence": confidence,
"reasoning": reasoning,
}
return ToolResponse(
content=[
TextBlock(
type="text",
text=f"Decision recorded: {action} "
f"{quantity} shares of {ticker}"
f" (confidence: {confidence}%)",
),
],
)
def _add_team_analyst(self, agent_id: str) -> ToolResponse:
"""Add one analyst to active discussion team."""
config_name = self.config.get("config_name", "default")
project_root = Path(__file__).resolve().parents[2]
active = update_active_analysts(
project_root=project_root,
config_name=config_name,
available_analysts=list(ANALYST_TYPES.keys()),
add=[agent_id],
)
return ToolResponse(
content=[
TextBlock(
type="text",
text=(
f"Active analyst team updated. Added: {agent_id}. "
f"Current active analysts: {', '.join(active)}"
),
),
],
)
def _remove_team_analyst(self, agent_id: str) -> ToolResponse:
"""Remove one analyst from active discussion team."""
callback_msg = ""
callback = self._remove_team_agent_cb
if callback is not None:
callback_msg = callback(agent_id=agent_id)
config_name = self.config.get("config_name", "default")
project_root = Path(__file__).resolve().parents[2]
active = update_active_analysts(
project_root=project_root,
config_name=config_name,
available_analysts=list(ANALYST_TYPES.keys()),
remove=[agent_id],
)
return ToolResponse(
content=[
TextBlock(
type="text",
text=(
f"Active analyst team updated. Removed: {agent_id}. "
f"Current active analysts: {', '.join(active)}"
+ (f" | {callback_msg}" if callback_msg else "")
),
),
],
)
def _set_active_analysts(self, agent_ids: str) -> ToolResponse:
"""Set active analysts from comma-separated agent ids."""
requested = [
item.strip() for item in str(agent_ids or "").split(",") if item.strip()
]
config_name = self.config.get("config_name", "default")
project_root = Path(__file__).resolve().parents[2]
active = update_active_analysts(
project_root=project_root,
config_name=config_name,
available_analysts=list(ANALYST_TYPES.keys()),
set_to=requested,
)
return ToolResponse(
content=[
TextBlock(
type="text",
text=f"Active analyst team set to: {', '.join(active)}",
),
],
)
def _create_team_analyst(self, agent_id: str, analyst_type: str) -> ToolResponse:
"""Create a runtime analyst instance and activate it."""
callback = self._create_team_agent_cb
if callback is None:
return ToolResponse(
content=[
TextBlock(
type="text",
text="Runtime agent creation is not available in current pipeline.",
),
],
)
result = callback(agent_id=agent_id, analyst_type=analyst_type)
return ToolResponse(
content=[
TextBlock(type="text", text=result),
],
)
def set_team_controller(
self,
*,
create_agent_callback: Optional[Callable[..., str]] = None,
remove_agent_callback: Optional[Callable[..., str]] = None,
) -> None:
"""Inject runtime team lifecycle callbacks from pipeline."""
object.__setattr__(self, "_create_team_agent_cb", create_agent_callback)
object.__setattr__(self, "_remove_team_agent_cb", remove_agent_callback)
async def reply(self, x: Msg = None) -> Msg:
"""
Make investment decisions
Returns:
Msg with decisions in metadata
"""
if x is None:
return Msg(
name=self.name,
content="No input provided",
role="assistant",
)
# Clear previous decisions
self._decisions = {}
progress.update_status(
self.name,
None,
"Analyzing and making decisions",
)
result = await super().reply(x)
progress.update_status(self.name, None, "Completed")
# Attach decisions to metadata
if result.metadata is None:
result.metadata = {}
result.metadata["decisions"] = self._decisions.copy()
result.metadata["portfolio"] = self.portfolio.copy()
return result
def get_decisions(self) -> Dict[str, Dict]:
"""Get decisions from current cycle"""
return self._decisions.copy()
def get_portfolio_state(self) -> Dict[str, Any]:
"""Get current portfolio state"""
return self.portfolio.copy()
def load_portfolio_state(self, portfolio: Dict[str, Any]):
"""Load portfolio state"""
if not portfolio:
return
self.portfolio = {
"cash": portfolio.get("cash", self.portfolio["cash"]),
"positions": portfolio.get("positions", {}).copy(),
"margin_used": portfolio.get("margin_used", 0.0),
"margin_requirement": portfolio.get(
"margin_requirement",
self.portfolio["margin_requirement"],
),
}
def update_portfolio(self, portfolio: Dict[str, Any]):
"""Update portfolio after external execution"""
self.portfolio.update(portfolio)
def _has_open_positions(self) -> bool:
"""Return whether the current portfolio still has non-zero positions."""
for position in self.portfolio.get("positions", {}).values():
if position.get("long", 0) or position.get("short", 0):
return True
return False
def can_apply_initial_cash(self) -> bool:
"""Only allow cash rebasing before any positions or margin exist."""
return (
not self._has_open_positions()
and float(self.portfolio.get("margin_used", 0.0) or 0.0) == 0.0
)
def apply_runtime_portfolio_config(
self,
*,
margin_requirement: Optional[float] = None,
initial_cash: Optional[float] = None,
) -> Dict[str, bool]:
"""Apply safe run-time portfolio config updates."""
result = {
"margin_requirement": False,
"initial_cash": False,
}
if margin_requirement is not None:
self.portfolio["margin_requirement"] = float(margin_requirement)
result["margin_requirement"] = True
if initial_cash is not None and self.can_apply_initial_cash():
self.portfolio["cash"] = float(initial_cash)
result["initial_cash"] = True
return result
def reload_runtime_assets(self, active_skill_dirs: Optional[list] = None) -> None:
"""Reload toolkit and system prompt from current run assets."""
from .toolkit_factory import create_agent_toolkit
clear_prompt_factory_cache()
toolkit_factory = self._toolkit_factory or create_agent_toolkit
toolkit_kwargs = dict(self._toolkit_factory_kwargs)
if active_skill_dirs is not None:
toolkit_kwargs["active_skill_dirs"] = active_skill_dirs
self.toolkit = toolkit_factory(
self.name,
self.config.get("config_name", "default"),
owner=self,
**toolkit_kwargs,
)
self._apply_runtime_sys_prompt(
build_agent_system_prompt(
agent_id=self.name,
config_name=self.config.get("config_name", "default"),
toolkit=self.toolkit,
),
)
def _apply_runtime_sys_prompt(self, sys_prompt: str) -> None:
"""Update the prompt used by future turns and the cached system msg."""
self._sys_prompt = sys_prompt
for msg, _marks in self.memory.content:
if getattr(msg, "role", None) == "system":
msg.content = sys_prompt
break

View File

@@ -0,0 +1,124 @@
# -*- coding: utf-8 -*-
"""Assemble system prompts from run workspace assets and toolkit context."""
from pathlib import Path
from typing import Any
from .agent_workspace import load_agent_workspace_config
from backend.config.bootstrap_config import get_bootstrap_config_for_run
from .skills_manager import SkillsManager
from .workspace_manager import RunWorkspaceManager
def _read_file_if_exists(path: Path) -> str:
if not path.exists() or not path.is_file():
return ""
return path.read_text(encoding="utf-8").strip()
def _append_section(parts: list[str], title: str, content: str) -> None:
content = content.strip()
if content:
parts.append(f"## {title}\n{content}")
def _build_skill_metadata_summary(skills_manager: SkillsManager, config_name: str, agent_id: str) -> str:
"""Create a compact summary of active skills for prompt routing."""
metadata_items = skills_manager.list_active_skill_metadata(config_name, agent_id)
if not metadata_items:
return ""
lines: list[str] = [
"You can use the following active skills. Prefer the most relevant one, then read its SKILL.md if needed for detailed workflow:",
]
for item in metadata_items:
parts = [f"- `{item.skill_name}`"]
if item.description:
parts.append(item.description)
if item.version:
parts.append(f"version: {item.version}")
parts.append(f"path: {item.path}")
lines.append(" | ".join(parts))
return "\n".join(lines)
def build_agent_system_prompt(
agent_id: str,
config_name: str,
toolkit: Any,
) -> str:
"""Build the final system prompt for an agent.
Always reads fresh from disk — no caching.
"""
sections: list[str] = []
skills_manager = SkillsManager()
asset_dir = skills_manager.get_agent_asset_dir(config_name, agent_id)
asset_dir.mkdir(parents=True, exist_ok=True)
workspace_manager = RunWorkspaceManager(project_root=skills_manager.project_root)
required_files = ["SOUL.md", "PROFILE.md", "AGENTS.md", "POLICY.md", "MEMORY.md"]
if not all((asset_dir / filename).exists() for filename in required_files):
workspace_manager.ensure_agent_assets(config_name=config_name, agent_id=agent_id)
agent_config = load_agent_workspace_config(asset_dir / "agent.yaml")
bootstrap_config = get_bootstrap_config_for_run(
skills_manager.project_root,
config_name,
)
_append_section(
sections,
"Bootstrap",
bootstrap_config.prompt_body,
)
prompt_files = agent_config.prompt_files or [
"SOUL.md",
"PROFILE.md",
"AGENTS.md",
"POLICY.md",
"MEMORY.md",
]
included_files = set(prompt_files)
title_map = {
"SOUL.md": "Soul",
"PROFILE.md": "Profile",
"AGENTS.md": "Agent Guide",
"POLICY.md": "Policy",
"MEMORY.md": "Memory",
}
for filename in prompt_files:
_append_section(
sections,
title_map.get(filename, filename),
_read_file_if_exists(asset_dir / filename),
)
if "POLICY.md" not in included_files:
_append_section(
sections,
"Policy",
_read_file_if_exists(asset_dir / "POLICY.md"),
)
skill_prompt = toolkit.get_agent_skill_prompt()
if skill_prompt:
_append_section(sections, "Skills", str(skill_prompt))
metadata_summary = _build_skill_metadata_summary(
skills_manager=skills_manager,
config_name=config_name,
agent_id=agent_id,
)
if metadata_summary:
_append_section(sections, "Active Skill Catalog", metadata_summary)
activated_notes = toolkit.get_activated_notes()
if activated_notes:
_append_section(sections, "Tool Usage Notes", str(activated_notes))
return "\n\n".join(section for section in sections if section.strip())
def clear_prompt_factory_cache() -> None:
"""No-op retained for compatibility with runtime reload hooks."""

View File

@@ -0,0 +1,153 @@
# -*- coding: utf-8 -*-
"""
Prompt Loader - Unified management and loading of Agent Prompts
Supports Markdown and YAML formats
Uses simple string replacement, does not depend on Jinja2
"""
import re
from pathlib import Path
from typing import Any, Dict, Optional
import yaml
# Singleton instance
_prompt_loader_instance: Optional["PromptLoader"] = None
def get_prompt_loader() -> "PromptLoader":
"""Get the singleton PromptLoader instance."""
global _prompt_loader_instance
if _prompt_loader_instance is None:
_prompt_loader_instance = PromptLoader()
return _prompt_loader_instance
class PromptLoader:
"""Unified Prompt loader"""
def __init__(self, prompts_dir: Optional[Path] = None):
"""
Initialize Prompt loader
Args:
prompts_dir: Prompts directory path,
defaults to prompts/ directory of current file
"""
if prompts_dir is None:
self.prompts_dir = Path(__file__).parent / "prompts"
else:
self.prompts_dir = Path(prompts_dir)
def load_prompt(
self,
agent_type: str,
prompt_name: str,
variables: Optional[Dict[str, Any]] = None,
) -> str:
"""
Load and render Prompt.
No caching — always reads fresh from disk (CoPaw-style).
"""
prompt_path = self.prompts_dir / agent_type / f"{prompt_name}.md"
if not prompt_path.exists():
raise FileNotFoundError(
f"Prompt file not found: {prompt_path}\n"
f"Please create the prompt file or check the path.",
)
with open(prompt_path, "r", encoding="utf-8") as f:
prompt_template = f.read()
# If variables provided, use simple string replacement
if variables:
rendered = self._render_template(prompt_template, variables)
else:
rendered = prompt_template
return rendered
def _render_template(
self,
template: str,
variables: Dict[str, Any],
) -> str:
"""
Render template using simple string replacement
Supports {{ variable }} syntax (compatible with previous Jinja2 format)
Args:
template: Template string
variables: Variable dictionary
Returns:
Rendered string
"""
rendered = template
# Replace {{ variable }} format
for key, value in variables.items():
# Support both {{ key }} and {{key}} formats
pattern1 = f"{{{{ {key} }}}}"
pattern2 = f"{{{{{key}}}}}"
rendered = rendered.replace(pattern1, str(value))
rendered = rendered.replace(pattern2, str(value))
return rendered
def _escape_json_braces(self, text: str) -> str:
"""
Escape braces in JSON code blocks, treating them as literals
Args:
text: Text to process
Returns:
Processed text
"""
def replace_code_block(match):
code_content = match.group(1)
# Escape all braces within code block
escaped = code_content.replace("{", "{{").replace("}", "}}")
return f"```json\n{escaped}\n```"
# Replace all braces in JSON code blocks
text = re.sub(
r"```json\n(.*?)\n```",
replace_code_block,
text,
flags=re.DOTALL,
)
return text
def load_yaml_config(
self,
agent_type: str,
config_name: str,
) -> Dict[str, Any]:
"""
Load YAML configuration file.
No caching — always reads fresh from disk (CoPaw-style).
"""
yaml_path = self.prompts_dir / agent_type / f"{config_name}.yaml"
if not yaml_path.exists():
raise FileNotFoundError(f"YAML config not found: {yaml_path}")
with open(yaml_path, "r", encoding="utf-8") as f:
return yaml.safe_load(f) or {}
def clear_cache(self):
"""No-op — caching removed (CoPaw-style, always fresh reads)."""
pass
def reload_prompt(self, agent_type: str, prompt_name: str):
"""No-op — caching removed."""
pass
def reload_config(self, agent_type: str, config_name: str):
"""No-op — caching removed."""
pass

View File

@@ -0,0 +1,19 @@
# -*- coding: utf-8 -*-
"""Prompt building utilities for EvoAgent.
This module provides prompt construction from workspace markdown files
with YAML frontmatter support.
"""
from .builder import (
PromptBuilder,
build_system_prompt_from_workspace,
build_bootstrap_guidance,
DEFAULT_SYS_PROMPT,
)
__all__ = [
"PromptBuilder",
"build_system_prompt_from_workspace",
"build_bootstrap_guidance",
"DEFAULT_SYS_PROMPT",
]

View File

@@ -0,0 +1,84 @@
# 分析师角色配置
fundamentals_analyst:
name: "基本面分析师"
focus:
- "公司财务健康状况和盈利能力"
- "商业模式可持续性和竞争优势"
- "管理层质量和公司治理"
- "行业地位和市场份额"
- "长期投资价值评估"
description: |
作为基本面分析师,你专注于:
- 公司财务健康状况和盈利能力
- 商业模式可持续性和竞争优势
- 管理层质量和公司治理
- 行业地位和市场份额
- 长期投资价值评估
你倾向于选择能够深入了解公司内在价值的工具,更偏好基本面和估值类工具。
technical_analyst:
name: "技术分析师"
focus:
- "价格趋势和图表形态"
- "技术指标和交易信号"
- "市场情绪和资金流向"
- "支撑/阻力位和关键价格点"
- "中短期交易机会"
description: |
作为技术分析师,你专注于:
- 价格趋势和图表形态
- 技术指标和交易信号
- 市场情绪和资金流向
- 支撑/阻力位和关键价格点
- 中短期交易机会
你倾向于选择能够捕捉价格动态和市场趋势的工具,更偏好技术分析类工具。
sentiment_analyst:
name: "情绪分析师"
focus:
- "市场参与者情绪变化"
- "新闻舆情和媒体影响"
- "内部人交易行为"
- "投资者恐慌和贪婪情绪"
- "市场预期和心理因素"
description: |
作为情绪分析师,你专注于:
- 市场参与者情绪变化
- 新闻舆情和媒体影响
- 内部人交易行为
- 投资者恐慌和贪婪情绪
- 市场预期和心理因素
你倾向于选择能够反映市场情绪和投资者行为的工具,更偏好情绪和行为类工具。
valuation_analyst:
name: "估值分析师"
focus:
- "公司内在价值计算"
- "不同估值方法的比较"
- "估值模型假设和敏感性分析"
- "相对估值和绝对估值"
- "投资安全边际评估"
description: |
作为估值分析师,你专注于:
- 公司内在价值计算
- 不同估值方法的比较
- 估值模型假设和敏感性分析
- 相对估值和绝对估值
- 投资安全边际评估
你倾向于选择能够准确计算公司价值的工具,更偏好估值模型和基本面工具。
comprehensive_analyst:
name: "综合分析师"
focus:
- "整合多种分析视角"
- "平衡短期和长期因素"
- "综合考虑基本面、技术面和情绪面"
- "提供全面的投资建议"
- "适应不同市场环境"
description: |
作为综合分析师,你需要:
- 整合多种分析视角
- 平衡短期和长期因素
- 综合考虑基本面、技术面和情绪面的影响
- 提供全面的投资建议
- 适应不同市场环境
你会根据具体情况灵活选择各类工具,追求分析的全面性和准确性。

View File

@@ -0,0 +1,299 @@
# -*- coding: utf-8 -*-
"""PromptBuilder for constructing system prompts from workspace markdown files.
Based on CoPaw design - loads AGENTS.md, SOUL.md, PROFILE.md, etc. from
agent workspace directories with YAML frontmatter support.
"""
from __future__ import annotations
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional
import yaml
logger = logging.getLogger(__name__)
DEFAULT_SYS_PROMPT = """You are a helpful trading analysis assistant."""
class PromptBuilder:
"""Builder for constructing system prompts from markdown files.
Loads markdown configuration files from agent workspace directories,
supporting YAML frontmatter for metadata extraction.
"""
DEFAULT_FILES = [
"AGENTS.md",
"SOUL.md",
"PROFILE.md",
"POLICY.md",
"MEMORY.md",
]
TITLE_MAP: Dict[str, str] = {
"AGENTS.md": "Agent Guide",
"SOUL.md": "Soul",
"PROFILE.md": "Profile",
"POLICY.md": "Policy",
"MEMORY.md": "Memory",
"BOOTSTRAP.md": "Bootstrap",
}
def __init__(
self,
workspace_dir: Path,
enabled_files: Optional[List[str]] = None,
):
"""Initialize prompt builder.
Args:
workspace_dir: Directory containing markdown configuration files
enabled_files: List of filenames to load (if None, uses defaults)
"""
self.workspace_dir = Path(workspace_dir)
self.enabled_files = enabled_files or self.DEFAULT_FILES.copy()
self._prompt_parts: List[str] = []
self._metadata: Dict[str, Any] = {}
self.loaded_count = 0
def _load_file(self, filename: str) -> tuple[str, Optional[Dict[str, Any]]]:
"""Load a single markdown file with YAML frontmatter support.
Args:
filename: Name of the file to load
Returns:
Tuple of (content, metadata dict or None)
"""
file_path = self.workspace_dir / filename
if not file_path.exists():
logger.debug("File %s not found in %s, skipping", filename, self.workspace_dir)
return "", None
try:
raw_content = file_path.read_text(encoding="utf-8").strip()
if not raw_content:
logger.debug("Skipped empty file: %s", filename)
return "", None
content, metadata = self._parse_frontmatter(raw_content)
if content:
self.loaded_count += 1
logger.debug("Loaded %s (metadata: %s)", filename, bool(metadata))
return content, metadata
except Exception as e:
logger.warning("Failed to read file %s: %s, skipping", filename, e)
return "", None
def _parse_frontmatter(self, raw_content: str) -> tuple[str, Optional[Dict[str, Any]]]:
"""Parse YAML frontmatter from markdown content.
Args:
raw_content: Raw file content
Returns:
Tuple of (content without frontmatter, metadata dict or None)
"""
if not raw_content.startswith("---"):
return raw_content, None
parts = raw_content.split("---", 2)
if len(parts) < 3:
return raw_content, None
frontmatter = parts[1].strip()
content = parts[2].strip()
try:
metadata = yaml.safe_load(frontmatter) or {}
if not isinstance(metadata, dict):
metadata = {}
return content, metadata
except yaml.YAMLError as e:
logger.warning("Failed to parse YAML frontmatter: %s", e)
return content, None
def _append_section(self, title: str, content: str) -> None:
"""Append a section to the prompt parts.
Args:
title: Section title
content: Section content
"""
content = content.strip()
if not content:
return
if self._prompt_parts:
self._prompt_parts.append("")
self._prompt_parts.append(f"## {title}")
self._prompt_parts.append("")
self._prompt_parts.append(content)
def build(self) -> str:
"""Build the system prompt from markdown files.
Returns:
Constructed system prompt string
"""
self._prompt_parts = []
self._metadata = {}
self.loaded_count = 0
for filename in self.enabled_files:
content, metadata = self._load_file(filename)
if metadata:
self._metadata[filename] = metadata
if content:
title = self.TITLE_MAP.get(filename, filename.replace(".md", ""))
self._append_section(title, content)
if not self._prompt_parts:
logger.warning("No content loaded from workspace: %s", self.workspace_dir)
return DEFAULT_SYS_PROMPT
final_prompt = "\n".join(self._prompt_parts)
logger.debug(
"System prompt built from %d file(s), total length: %d chars",
self.loaded_count,
len(final_prompt),
)
return final_prompt
def get_metadata(self) -> Dict[str, Any]:
"""Get metadata collected from YAML frontmatter.
Returns:
Dictionary mapping filenames to their metadata
"""
return self._metadata.copy()
def get_agent_identity(self) -> Optional[Dict[str, Any]]:
"""Extract agent identity from PROFILE.md metadata.
Returns:
Identity dict with name, role, etc. or None
"""
profile_meta = self._metadata.get("PROFILE.md", {})
if not profile_meta:
return None
return {
"name": profile_meta.get("name", "Unknown"),
"role": profile_meta.get("role", ""),
"expertise": profile_meta.get("expertise", []),
"style": profile_meta.get("style", ""),
}
def build_system_prompt_from_workspace(
workspace_dir: Path,
enabled_files: Optional[List[str]] = None,
agent_id: Optional[str] = None,
extra_context: Optional[str] = None,
) -> str:
"""Build system prompt from workspace markdown files.
This is the main entry point for building system prompts from
agent workspace directories.
Args:
workspace_dir: Directory containing markdown configuration files
enabled_files: List of filenames to load (if None, uses defaults)
agent_id: Agent identifier to include in system prompt
extra_context: Additional context to append to the prompt
Returns:
Constructed system prompt string
"""
builder = PromptBuilder(
workspace_dir=workspace_dir,
enabled_files=enabled_files,
)
prompt = builder.build()
# Add agent identity header if agent_id provided
if agent_id and agent_id != "default":
identity_header = (
f"# Agent Identity\n\n"
f"Your agent ID is `{agent_id}`. "
f"This is your unique identifier in the multi-agent system.\n\n"
)
prompt = identity_header + prompt
# Append extra context if provided
if extra_context:
prompt = prompt + "\n\n" + extra_context
return prompt
def build_bootstrap_guidance(language: str = "zh") -> str:
"""Build bootstrap guidance message for first-time setup.
Args:
language: Language code (zh/en)
Returns:
Formatted bootstrap guidance message
"""
if language == "zh":
return (
"# 引导模式\n"
"\n"
"工作目录中存在 `BOOTSTRAP.md` — 首次设置。\n"
"\n"
"1. 阅读 BOOTSTRAP.md友好地表示初次见面"
"引导用户完成设置。\n"
"2. 按照 BOOTSTRAP.md 的指示,"
"帮助用户定义你的身份和偏好。\n"
"3. 按指南创建/更新必要文件"
"PROFILE.md、MEMORY.md 等)。\n"
"4. 完成后删除 BOOTSTRAP.md。\n"
"\n"
"如果用户希望跳过,直接回答下面的问题即可。\n"
"\n"
"---\n"
"\n"
)
return (
"# BOOTSTRAP MODE\n"
"\n"
"`BOOTSTRAP.md` exists — first-time setup.\n"
"\n"
"1. Read BOOTSTRAP.md, greet the user, "
"and guide them through setup.\n"
"2. Follow BOOTSTRAP.md instructions "
"to define identity and preferences.\n"
"3. Create/update files "
"(PROFILE.md, MEMORY.md, etc.) as described.\n"
"4. Delete BOOTSTRAP.md when done.\n"
"\n"
"If the user wants to skip, answer their "
"question directly instead.\n"
"\n"
"---\n"
"\n"
)
__all__ = [
"PromptBuilder",
"build_system_prompt_from_workspace",
"build_bootstrap_guidance",
"DEFAULT_SYS_PROMPT",
]

284
backend/agents/registry.py Normal file
View File

@@ -0,0 +1,284 @@
# -*- coding: utf-8 -*-
"""Agent Registry - In-memory registry for agent management."""
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional
@dataclass
class AgentInfo:
"""Information about a registered agent."""
agent_id: str
agent_type: str
workspace_id: str
config_path: str
agent_dir: str
status: str = "inactive" # inactive, active, error
metadata: Dict[str, Any] = field(default_factory=dict)
def to_dict(self) -> Dict[str, Any]:
"""Serialize to dictionary."""
return {
"agent_id": self.agent_id,
"agent_type": self.agent_type,
"workspace_id": self.workspace_id,
"config_path": self.config_path,
"agent_dir": self.agent_dir,
"status": self.status,
"metadata": self.metadata,
}
class AgentRegistry:
"""In-memory registry for agent instances."""
def __init__(self):
"""Initialize the agent registry."""
# Dictionary mapping agent_id -> AgentInfo
self._agents: Dict[str, AgentInfo] = {}
# Index mapping workspace_id -> set of agent_ids
self._workspace_index: Dict[str, set] = {}
def register(
self,
agent_id: str,
agent_type: str,
workspace_id: str,
config_path: str,
agent_dir: str,
status: str = "inactive",
metadata: Optional[Dict[str, Any]] = None,
) -> AgentInfo:
"""Register an agent in the registry.
Args:
agent_id: Unique identifier for the agent
agent_type: Type of agent
workspace_id: ID of the workspace containing the agent
config_path: Path to agent configuration file
agent_dir: Path to agent directory
status: Initial status (default: inactive)
metadata: Optional metadata dictionary
Returns:
AgentInfo instance
Raises:
ValueError: If agent_id is already registered
"""
if agent_id in self._agents:
raise ValueError(f"Agent '{agent_id}' is already registered")
agent_info = AgentInfo(
agent_id=agent_id,
agent_type=agent_type,
workspace_id=workspace_id,
config_path=config_path,
agent_dir=agent_dir,
status=status,
metadata=metadata or {},
)
self._agents[agent_id] = agent_info
# Update workspace index
if workspace_id not in self._workspace_index:
self._workspace_index[workspace_id] = set()
self._workspace_index[workspace_id].add(agent_id)
return agent_info
def unregister(self, agent_id: str) -> bool:
"""Unregister an agent.
Args:
agent_id: ID of the agent to unregister
Returns:
True if unregistered, False if agent wasn't registered
"""
if agent_id not in self._agents:
return False
agent_info = self._agents[agent_id]
# Remove from workspace index
workspace_id = agent_info.workspace_id
if workspace_id in self._workspace_index:
self._workspace_index[workspace_id].discard(agent_id)
if not self._workspace_index[workspace_id]:
del self._workspace_index[workspace_id]
# Remove from agents dict
del self._agents[agent_id]
return True
def get(self, agent_id: str) -> Optional[AgentInfo]:
"""Get agent information by ID.
Args:
agent_id: ID of the agent
Returns:
AgentInfo if found, None otherwise
"""
return self._agents.get(agent_id)
def list_all(
self,
workspace_id: Optional[str] = None,
agent_type: Optional[str] = None,
status: Optional[str] = None,
) -> List[AgentInfo]:
"""List all registered agents with optional filtering.
Args:
workspace_id: Filter by workspace ID
agent_type: Filter by agent type
status: Filter by status
Returns:
List of AgentInfo instances
"""
agents = list(self._agents.values())
if workspace_id:
agent_ids = self._workspace_index.get(workspace_id, set())
agents = [a for a in agents if a.agent_id in agent_ids]
if agent_type:
agents = [a for a in agents if a.agent_type == agent_type]
if status:
agents = [a for a in agents if a.status == status]
return agents
def update_status(self, agent_id: str, status: str) -> bool:
"""Update the status of an agent.
Args:
agent_id: ID of the agent
status: New status value
Returns:
True if updated, False if agent not found
"""
if agent_id not in self._agents:
return False
self._agents[agent_id].status = status
return True
def update_metadata(self, agent_id: str, metadata: Dict[str, Any]) -> bool:
"""Update the metadata of an agent.
Args:
agent_id: ID of the agent
metadata: Metadata dictionary to merge
Returns:
True if updated, False if agent not found
"""
if agent_id not in self._agents:
return False
self._agents[agent_id].metadata.update(metadata)
return True
def is_registered(self, agent_id: str) -> bool:
"""Check if an agent is registered.
Args:
agent_id: ID of the agent
Returns:
True if registered, False otherwise
"""
return agent_id in self._agents
def get_workspace_agents(self, workspace_id: str) -> List[AgentInfo]:
"""Get all agents in a workspace.
Args:
workspace_id: ID of the workspace
Returns:
List of AgentInfo instances
"""
agent_ids = self._workspace_index.get(workspace_id, set())
return [self._agents[agent_id] for agent_id in agent_ids if agent_id in self._agents]
def get_agent_count(self, workspace_id: Optional[str] = None) -> int:
"""Get the count of registered agents.
Args:
workspace_id: Optional workspace ID to filter by
Returns:
Number of agents
"""
if workspace_id:
return len(self._workspace_index.get(workspace_id, set()))
return len(self._agents)
def clear(self) -> None:
"""Clear all registered agents."""
self._agents.clear()
self._workspace_index.clear()
def get_stats(self) -> Dict[str, Any]:
"""Get registry statistics.
Returns:
Dictionary with registry statistics
"""
stats = {
"total_agents": len(self._agents),
"workspaces": len(self._workspace_index),
"agents_by_workspace": {
ws_id: len(agent_ids)
for ws_id, agent_ids in self._workspace_index.items()
},
"agents_by_type": {},
"agents_by_status": {},
}
for agent in self._agents.values():
# Count by type
agent_type = agent.agent_type
stats["agents_by_type"][agent_type] = (
stats["agents_by_type"].get(agent_type, 0) + 1
)
# Count by status
status = agent.status
stats["agents_by_status"][status] = (
stats["agents_by_status"].get(status, 0) + 1
)
return stats
# Global registry instance
_global_registry: Optional[AgentRegistry] = None
def get_registry() -> AgentRegistry:
"""Get the global agent registry instance.
Returns:
AgentRegistry instance
"""
global _global_registry
if _global_registry is None:
_global_registry = AgentRegistry()
return _global_registry
def reset_registry() -> None:
"""Reset the global registry (useful for testing)."""
global _global_registry
_global_registry = None

View File

@@ -0,0 +1,110 @@
# -*- coding: utf-8 -*-
"""
Risk Manager Agent - Based on AgentScope ReActAgent
Uses LLM for risk assessment
"""
from typing import Any, Dict, Optional
from agentscope.agent import ReActAgent
from agentscope.memory import InMemoryMemory, LongTermMemoryBase
from agentscope.message import Msg
from agentscope.tool import Toolkit
from ..utils.progress import progress
from .prompt_factory import build_agent_system_prompt, clear_prompt_factory_cache
class RiskAgent(ReActAgent):
"""
Risk Manager Agent - Uses LLM for risk assessment
Inherits from AgentScope's ReActAgent
"""
def __init__(
self,
model: Any,
formatter: Any,
name: str = "risk_manager",
config: Optional[Dict[str, Any]] = None,
long_term_memory: Optional[LongTermMemoryBase] = None,
toolkit: Optional[Toolkit] = None,
):
"""
Initialize Risk Manager Agent
Args:
model: LLM model instance
formatter: Message formatter instance
name: Agent name
config: Configuration dictionary
long_term_memory: Optional ReMeTaskLongTermMemory instance
"""
object.__setattr__(self, "config", config or {})
object.__setattr__(self, "agent_id", name)
if toolkit is None:
toolkit = Toolkit()
object.__setattr__(self, "toolkit", toolkit)
sys_prompt = self._load_system_prompt()
kwargs = {
"name": name,
"sys_prompt": sys_prompt,
"model": model,
"formatter": formatter,
"toolkit": toolkit,
"memory": InMemoryMemory(),
"max_iters": 10,
}
if long_term_memory:
kwargs["long_term_memory"] = long_term_memory
kwargs["long_term_memory_mode"] = "static_control"
super().__init__(**kwargs)
def _load_system_prompt(self) -> str:
"""Load system prompt for risk manager"""
return build_agent_system_prompt(
agent_id=self.agent_id,
config_name=self.config.get("config_name", "default"),
toolkit=self.toolkit,
)
async def reply(self, x: Msg = None) -> Msg:
"""
Provide risk assessment
Args:
x: Input message (content must be str)
Returns:
Msg with risk warnings (content is str)
"""
progress.update_status(self.name, None, "Assessing risk")
result = await super().reply(x)
progress.update_status(self.name, None, "Risk assessment completed")
return result
def reload_runtime_assets(self, active_skill_dirs: Optional[list] = None) -> None:
"""Reload toolkit and system prompt from current run assets."""
from .toolkit_factory import create_agent_toolkit
clear_prompt_factory_cache()
self.toolkit = create_agent_toolkit(
self.agent_id,
self.config.get("config_name", "default"),
active_skill_dirs=active_skill_dirs,
)
self._apply_runtime_sys_prompt(self._load_system_prompt())
def _apply_runtime_sys_prompt(self, sys_prompt: str) -> None:
"""Update the prompt used by future turns and the cached system msg."""
self._sys_prompt = sys_prompt
for msg, _marks in self.memory.content:
if getattr(msg, "role", None) == "system":
msg.content = sys_prompt
break

View File

@@ -0,0 +1,388 @@
# -*- coding: utf-8 -*-
"""Skill loader for loading and validating skills from directories.
提供从目录加载技能、解析SKILL.md frontmatter、获取工具列表等功能。
"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, List, Optional, Set
import yaml
from backend.agents.skill_metadata import SkillMetadata, parse_skill_metadata
logger = logging.getLogger(__name__)
@dataclass
class SkillInfo:
"""完整的技能信息"""
name: str
description: str
version: str
source: str
path: Path
metadata: SkillMetadata
tools: List[str] = field(default_factory=list)
scripts: List[str] = field(default_factory=list)
references: List[str] = field(default_factory=list)
content: str = ""
def load_skill_from_dir(skill_dir: Path, source: str = "unknown") -> Optional[Dict[str, Any]]:
"""从目录加载技能
Args:
skill_dir: 技能目录路径
source: 技能来源 (builtin/customized/local/installed/active)
Returns:
技能信息字典加载失败返回None
"""
if not skill_dir.exists() or not skill_dir.is_dir():
logger.warning(f"Skill directory does not exist: {skill_dir}")
return None
skill_md = skill_dir / "SKILL.md"
if not skill_md.exists():
logger.warning(f"SKILL.md not found in: {skill_dir}")
return None
try:
# 解析元数据
metadata = parse_skill_metadata(skill_dir, source=source)
# 读取完整内容
content = skill_md.read_text(encoding="utf-8")
# 提取body (去掉frontmatter)
body = content
if content.startswith("---"):
parts = content.split("---", 2)
if len(parts) >= 3:
body = parts[2].strip()
# 获取工具列表
tools = get_skill_tools(skill_dir)
# 获取脚本列表
scripts = _get_skill_scripts(skill_dir)
# 获取参考资料列表
references = _get_skill_references(skill_dir)
return {
"name": metadata.name,
"skill_name": metadata.skill_name,
"description": metadata.description,
"version": metadata.version,
"source": source,
"path": str(skill_dir),
"content": body,
"tools": tools,
"scripts": scripts,
"references": references,
"metadata": metadata,
}
except Exception as e:
logger.error(f"Failed to load skill from {skill_dir}: {e}")
return None
def parse_skill_metadata(skill_dir: Path, source: str = "unknown") -> SkillMetadata:
"""解析技能元数据 (兼容已有函数)
Args:
skill_dir: 技能目录路径
source: 技能来源
Returns:
SkillMetadata对象
"""
from backend.agents.skill_metadata import parse_skill_metadata as _parse
return _parse(skill_dir, source=source)
def get_skill_tools(skill_dir: Path) -> List[str]:
"""获取技能提供的工具列表
从SKILL.md frontmatter的tools字段和scripts目录解析工具。
Args:
skill_dir: 技能目录路径
Returns:
工具名称列表
"""
tools: Set[str] = set()
# 1. 从SKILL.md frontmatter读取tools字段
skill_md = skill_dir / "SKILL.md"
if skill_md.exists():
try:
raw = skill_md.read_text(encoding="utf-8").strip()
if raw.startswith("---"):
parts = raw.split("---", 2)
if len(parts) >= 3:
try:
frontmatter = yaml.safe_load(parts[1].strip()) or {}
if isinstance(frontmatter, dict):
tools_list = frontmatter.get("tools", [])
if isinstance(tools_list, str):
tools.add(tools_list.strip())
elif isinstance(tools_list, list):
for tool in tools_list:
if isinstance(tool, str):
tools.add(tool.strip())
except yaml.YAMLError:
pass
except Exception as e:
logger.warning(f"Failed to parse tools from SKILL.md: {e}")
# 2. 从scripts目录推断工具
scripts_dir = skill_dir / "scripts"
if scripts_dir.exists() and scripts_dir.is_dir():
for script in scripts_dir.iterdir():
if script.is_file() and not script.name.startswith("_"):
# 去掉扩展名作为工具名
tool_name = script.stem
tools.add(tool_name)
return sorted(list(tools))
def _get_skill_scripts(skill_dir: Path) -> List[str]:
"""获取技能脚本列表
Args:
skill_dir: 技能目录路径
Returns:
脚本相对路径列表 (相对于scripts目录)
"""
scripts: List[str] = []
scripts_dir = skill_dir / "scripts"
if not scripts_dir.exists():
return scripts
try:
for item in scripts_dir.rglob("*"):
if item.is_file() and not item.name.startswith("_"):
rel_path = item.relative_to(scripts_dir)
scripts.append(str(rel_path))
except Exception as e:
logger.warning(f"Failed to list scripts in {skill_dir}: {e}")
return sorted(scripts)
def _get_skill_references(skill_dir: Path) -> List[str]:
"""获取技能参考资料列表
Args:
skill_dir: 技能目录路径
Returns:
参考资料相对路径列表 (相对于references目录)
"""
refs: List[str] = []
refs_dir = skill_dir / "references"
if not refs_dir.exists():
return refs
try:
for item in refs_dir.rglob("*"):
if item.is_file():
rel_path = item.relative_to(refs_dir)
refs.append(str(rel_path))
except Exception as e:
logger.warning(f"Failed to list references in {skill_dir}: {e}")
return sorted(refs)
def validate_skill(skill_dir: Path) -> Dict[str, Any]:
"""验证技能格式
检查技能目录结构是否符合规范。
Args:
skill_dir: 技能目录路径
Returns:
验证结果字典,包含:
- valid: 是否有效
- errors: 错误列表
- warnings: 警告列表
"""
errors: List[str] = []
warnings: List[str] = []
# 检查目录存在
if not skill_dir.exists():
errors.append(f"Skill directory does not exist: {skill_dir}")
return {"valid": False, "errors": errors, "warnings": warnings}
if not skill_dir.is_dir():
errors.append(f"Path is not a directory: {skill_dir}")
return {"valid": False, "errors": errors, "warnings": warnings}
# 检查SKILL.md
skill_md = skill_dir / "SKILL.md"
if not skill_md.exists():
errors.append("SKILL.md is required but not found")
return {"valid": False, "errors": errors, "warnings": warnings}
# 解析frontmatter
try:
content = skill_md.read_text(encoding="utf-8").strip()
if not content.startswith("---"):
warnings.append("SKILL.md should have YAML frontmatter (starts with ---)")
else:
parts = content.split("---", 2)
if len(parts) < 3:
errors.append("Invalid YAML frontmatter format")
else:
try:
frontmatter = yaml.safe_load(parts[1].strip()) or {}
if not isinstance(frontmatter, dict):
errors.append("YAML frontmatter must be a dictionary")
else:
# 检查必需字段
if "name" not in frontmatter:
warnings.append("Frontmatter should have 'name' field")
if "description" not in frontmatter:
warnings.append("Frontmatter should have 'description' field")
# 检查version字段
version = frontmatter.get("version")
if version and not isinstance(version, str):
warnings.append("'version' should be a string")
# 检查tools字段
tools = frontmatter.get("tools")
if tools and not isinstance(tools, (str, list)):
warnings.append("'tools' should be a string or list")
except yaml.YAMLError as e:
errors.append(f"Invalid YAML in frontmatter: {e}")
except Exception as e:
errors.append(f"Failed to read SKILL.md: {e}")
# 检查body内容
try:
content = skill_md.read_text(encoding="utf-8")
body = content
if content.startswith("---"):
parts = content.split("---", 2)
if len(parts) >= 3:
body = parts[2].strip()
if not body:
warnings.append("SKILL.md body is empty")
elif len(body) < 50:
warnings.append("SKILL.md body is very short, consider adding more details")
except Exception as e:
errors.append(f"Failed to validate body: {e}")
# 检查scripts目录
scripts_dir = skill_dir / "scripts"
if scripts_dir.exists():
if not scripts_dir.is_dir():
errors.append("'scripts' exists but is not a directory")
else:
# 检查是否有可执行脚本
has_scripts = any(
f.is_file() and not f.name.startswith("_")
for f in scripts_dir.iterdir()
)
if not has_scripts:
warnings.append("scripts directory exists but contains no valid scripts")
# 检查references目录
refs_dir = skill_dir / "references"
if refs_dir.exists() and not refs_dir.is_dir():
errors.append("'references' exists but is not a directory")
return {
"valid": len(errors) == 0,
"errors": errors,
"warnings": warnings,
}
def load_skills_from_directory(
directory: Path,
source: str = "unknown",
recursive: bool = False,
) -> List[Dict[str, Any]]:
"""从目录加载所有技能
Args:
directory: 包含技能目录的父目录
source: 技能来源标识
recursive: 是否递归搜索子目录
Returns:
技能信息列表
"""
skills: List[Dict[str, Any]] = []
if not directory.exists() or not directory.is_dir():
logger.warning(f"Directory does not exist: {directory}")
return skills
try:
for item in directory.iterdir():
if not item.is_dir():
continue
# 检查是否是技能目录 (包含SKILL.md)
if (item / "SKILL.md").exists():
skill_info = load_skill_from_dir(item, source=source)
if skill_info:
skills.append(skill_info)
elif recursive:
# 递归搜索子目录
sub_skills = load_skills_from_directory(item, source, recursive)
skills.extend(sub_skills)
except Exception as e:
logger.error(f"Failed to load skills from {directory}: {e}")
return skills
def get_skill_manifest(skill_dir: Path) -> Dict[str, Any]:
"""获取技能清单
生成技能的详细清单,用于调试和展示。
Args:
skill_dir: 技能目录路径
Returns:
技能清单字典
"""
info = load_skill_from_dir(skill_dir)
if not info:
return {"error": "Failed to load skill"}
validation = validate_skill(skill_dir)
return {
"name": info["name"],
"skill_name": info["skill_name"],
"version": info["version"],
"description": info["description"],
"source": info["source"],
"path": info["path"],
"tools": info["tools"],
"scripts": info["scripts"],
"references": info["references"],
"validation": validation,
"content_preview": info["content"][:500] + "..." if len(info["content"]) > 500 else info["content"],
}

View File

@@ -0,0 +1,83 @@
# -*- coding: utf-8 -*-
"""Skill metadata parsing helpers for SKILL.md files."""
from dataclasses import dataclass, field
from pathlib import Path
from typing import List
import yaml
@dataclass(frozen=True)
class SkillMetadata:
"""Parsed metadata for a skill package."""
skill_name: str
path: Path
source: str
name: str
description: str
version: str = ""
tools: List[str] = field(default_factory=list)
allowed_tools: List[str] = field(default_factory=list)
denied_tools: List[str] = field(default_factory=list)
def parse_skill_metadata(skill_dir: Path, source: str) -> SkillMetadata:
"""Parse SKILL.md frontmatter with a forgiving schema."""
skill_name = skill_dir.name
skill_file = skill_dir / "SKILL.md"
if not skill_file.exists():
return SkillMetadata(
skill_name=skill_name,
path=skill_dir,
source=source,
name=skill_name,
description="",
)
raw = skill_file.read_text(encoding="utf-8").strip()
frontmatter = {}
body = raw
if raw.startswith("---"):
parts = raw.split("---", 2)
if len(parts) >= 3:
try:
frontmatter = yaml.safe_load(parts[1].strip()) or {}
except yaml.YAMLError:
frontmatter = {}
body = parts[2].strip()
if not isinstance(frontmatter, dict):
frontmatter = {}
description = str(frontmatter.get("description") or "").strip()
if not description and body:
description = body.splitlines()[0].strip().lstrip("#").strip()
return SkillMetadata(
skill_name=skill_name,
path=skill_dir,
source=source,
name=str(frontmatter.get("name") or skill_name).strip() or skill_name,
description=description,
version=str(frontmatter.get("version") or "").strip(),
tools=_string_list(frontmatter.get("tools")),
allowed_tools=_string_list(frontmatter.get("allowed_tools")),
denied_tools=_string_list(frontmatter.get("denied_tools")),
)
def _string_list(value) -> List[str]:
if isinstance(value, str):
item = value.strip()
return [item] if item else []
if not isinstance(value, list):
return []
seen: List[str] = []
for item in value:
if not isinstance(item, str):
continue
normalized = item.strip()
if normalized and normalized not in seen:
seen.append(normalized)
return seen

View File

@@ -0,0 +1,876 @@
# -*- coding: utf-8 -*-
"""Manage agent-installed and run-active skill directories for each run."""
from pathlib import Path
import shutil
import tempfile
import zipfile
from threading import Lock
from typing import Any, Dict, Iterable, Iterator, List, Optional, Set
from urllib.parse import urlparse
from urllib.request import urlretrieve
import yaml
from backend.agents.agent_workspace import load_agent_workspace_config
from backend.agents.skill_metadata import SkillMetadata, parse_skill_metadata
from backend.agents.skill_loader import validate_skill
from backend.config.bootstrap_config import get_bootstrap_config_for_run
try:
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler, FileSystemEvent
WATCHDOG_AVAILABLE = True
except ImportError:
WATCHDOG_AVAILABLE = False
Observer = None
FileSystemEventHandler = object
FileSystemEvent = object # type: ignore[misc,assignment]
class SkillsManager:
"""Sync named skills into a run-scoped active skills workspace."""
def __init__(self, project_root: Path | None = None):
self.project_root = (
project_root or Path(__file__).resolve().parents[2]
)
self.builtin_root = self.project_root / "backend" / "skills" / "builtin"
self.customized_root = (
self.project_root / "backend" / "skills" / "customized"
)
self.runs_root = self.project_root / "runs"
self._lock = Lock()
# Instance-level pending skill changes (thread-safe via self._lock)
self._pending_skill_changes: Dict[str, Set[Path]] = {}
def get_active_root(self, config_name: str) -> Path:
return self.runs_root / config_name / "skills" / "active"
def get_agent_skills_root(self, config_name: str, agent_id: str) -> Path:
return self.get_agent_asset_dir(config_name, agent_id) / "skills"
def get_agent_active_root(self, config_name: str, agent_id: str) -> Path:
return self.get_agent_skills_root(config_name, agent_id) / "active"
def get_agent_installed_root(self, config_name: str, agent_id: str) -> Path:
return self.get_agent_skills_root(config_name, agent_id) / "installed"
def get_agent_disabled_root(self, config_name: str, agent_id: str) -> Path:
return self.get_agent_skills_root(config_name, agent_id) / "disabled"
def get_agent_local_root(self, config_name: str, agent_id: str) -> Path:
return self.get_agent_skills_root(config_name, agent_id) / "local"
def get_activation_manifest_path(self, config_name: str) -> Path:
return self.runs_root / config_name / "skills" / "activation.yaml"
def get_agent_asset_dir(self, config_name: str, agent_id: str) -> Path:
return self.runs_root / config_name / "agents" / agent_id
def list_skill_catalog(self) -> List[SkillMetadata]:
"""Return builtin/customized skills with parsed metadata."""
catalog: Dict[str, SkillMetadata] = {}
for source, root in (
("builtin", self.builtin_root),
("customized", self.customized_root),
):
if not root.exists():
continue
for skill_dir in sorted(root.iterdir(), key=lambda item: item.name):
if not skill_dir.is_dir():
continue
if not (skill_dir / "SKILL.md").exists():
continue
metadata = parse_skill_metadata(skill_dir, source=source)
catalog[metadata.skill_name] = metadata
return sorted(catalog.values(), key=lambda item: item.skill_name)
def list_agent_skill_catalog(
self,
config_name: str,
agent_id: str,
) -> List[SkillMetadata]:
"""Return shared plus agent-local skills for one agent."""
catalog = {
item.skill_name: item
for item in self.list_skill_catalog()
}
for item in self.list_agent_local_skills(config_name, agent_id):
catalog[item.skill_name] = item
return sorted(catalog.values(), key=lambda item: item.skill_name)
def list_active_skill_metadata(
self,
config_name: str,
agent_id: str,
) -> List[SkillMetadata]:
"""Return metadata for active skills synced for one agent."""
active_root = self.get_agent_active_root(config_name, agent_id)
if not active_root.exists():
return []
items: List[SkillMetadata] = []
for skill_dir in sorted(active_root.iterdir(), key=lambda item: item.name):
if not skill_dir.is_dir():
continue
if not (skill_dir / "SKILL.md").exists():
continue
items.append(parse_skill_metadata(skill_dir, source="active"))
return items
def list_agent_local_skills(
self,
config_name: str,
agent_id: str,
) -> List[SkillMetadata]:
"""Return metadata for agent-private local skills."""
local_root = self.get_agent_local_root(config_name, agent_id)
if not local_root.exists():
return []
items: List[SkillMetadata] = []
for skill_dir in sorted(local_root.iterdir(), key=lambda item: item.name):
if not skill_dir.is_dir():
continue
if not (skill_dir / "SKILL.md").exists():
continue
items.append(parse_skill_metadata(skill_dir, source="local"))
return items
def load_skill_document(self, skill_name: str) -> Dict[str, object]:
"""Return skill metadata plus markdown body for one skill."""
source_dir = self._resolve_source_dir(skill_name)
return self._load_skill_document_from_dir(
source_dir,
source="customized" if source_dir.parent == self.customized_root else "builtin",
)
def load_agent_skill_document(
self,
config_name: str,
agent_id: str,
skill_name: str,
) -> Dict[str, object]:
"""Return skill metadata plus markdown body for one agent-visible skill."""
source_dir = self._resolve_agent_skill_source_dir(
config_name=config_name,
agent_id=agent_id,
skill_name=skill_name,
)
source = "local"
if source_dir.parent == self.customized_root:
source = "customized"
elif source_dir.parent == self.builtin_root:
source = "builtin"
elif source_dir.parent == self.get_agent_installed_root(config_name, agent_id):
source = "installed"
return self._load_skill_document_from_dir(source_dir, source=source)
def create_agent_local_skill(
self,
config_name: str,
agent_id: str,
skill_name: str,
) -> Path:
"""Create a new local skill directory with a default SKILL.md."""
normalized = _normalize_skill_name(skill_name)
if not normalized:
raise ValueError("Skill name is required.")
local_root = self.get_agent_local_root(config_name, agent_id)
local_root.mkdir(parents=True, exist_ok=True)
skill_dir = local_root / normalized
if skill_dir.exists():
raise FileExistsError(f"Local skill already exists: {normalized}")
skill_dir.mkdir(parents=True, exist_ok=False)
(skill_dir / "SKILL.md").write_text(
"---\n"
f"name: {normalized}\n"
"description: 当用户提出与该本地技能相关的专门任务时,应使用此技能。\n"
"version: 1.0.0\n"
"---\n\n"
f"# {normalized}\n\n"
"在这里描述该交易员的专有分析流程、判断框架和可复用步骤。\n",
encoding="utf-8",
)
return skill_dir
def install_external_skill_for_agent(
self,
config_name: str,
agent_id: str,
source: str,
*,
skill_name: str | None = None,
activate: bool = True,
) -> Dict[str, object]:
"""
Install an external skill into one agent's local skill space.
Supports:
- local skill directory containing SKILL.md
- local zip archive containing one skill directory
- http(s) URL to zip archive
"""
source_path = self._resolve_external_source_path(source)
skill_dir = self._resolve_external_skill_dir(source_path)
metadata = parse_skill_metadata(skill_dir, source="external")
final_name = _normalize_skill_name(skill_name or metadata.skill_name or skill_dir.name)
if not final_name:
raise ValueError("Could not determine skill name from external source.")
target_dir = self.get_agent_local_root(config_name, agent_id) / final_name
target_dir.parent.mkdir(parents=True, exist_ok=True)
if target_dir.exists():
shutil.rmtree(target_dir)
shutil.copytree(skill_dir, target_dir)
validation = validate_skill(target_dir)
if not validation.get("valid", False):
shutil.rmtree(target_dir, ignore_errors=True)
raise ValueError(
"Installed skill is invalid: "
+ "; ".join(validation.get("errors", []))
)
if activate:
self.update_agent_skill_overrides(
config_name=config_name,
agent_id=agent_id,
enable=[final_name],
)
return {
"skill_name": final_name,
"target_dir": str(target_dir),
"activated": activate,
"warnings": validation.get("warnings", []),
}
def update_agent_local_skill(
self,
config_name: str,
agent_id: str,
skill_name: str,
content: str,
) -> Path:
"""Overwrite one agent-local SKILL.md."""
normalized = _normalize_skill_name(skill_name)
if not normalized:
raise ValueError("Skill name is required.")
skill_dir = self.get_agent_local_root(config_name, agent_id) / normalized
if not skill_dir.exists():
raise FileNotFoundError(f"Unknown local skill: {normalized}")
(skill_dir / "SKILL.md").write_text(content, encoding="utf-8")
return skill_dir
def delete_agent_local_skill(
self,
config_name: str,
agent_id: str,
skill_name: str,
) -> None:
"""Delete one agent-local skill directory."""
normalized = _normalize_skill_name(skill_name)
if not normalized:
raise ValueError("Skill name is required.")
skill_dir = self.get_agent_local_root(config_name, agent_id) / normalized
if not skill_dir.exists():
raise FileNotFoundError(f"Unknown local skill: {normalized}")
shutil.rmtree(skill_dir)
def _load_skill_document_from_dir(
self,
source_dir: Path,
*,
source: str,
) -> Dict[str, object]:
"""Return metadata plus markdown body for one resolved skill directory."""
metadata = parse_skill_metadata(
source_dir,
source=source,
)
skill_file = source_dir / "SKILL.md"
raw = skill_file.read_text(encoding="utf-8").strip() if skill_file.exists() else ""
body = raw
if raw.startswith("---"):
parts = raw.split("---", 2)
if len(parts) >= 3:
body = parts[2].strip()
return {
"skill_name": metadata.skill_name,
"name": metadata.name,
"description": metadata.description,
"version": metadata.version,
"tools": metadata.tools,
"source": metadata.source,
"content": body,
}
def _resolve_external_source_path(self, source: str) -> Path:
"""Resolve source into a local path; download URL when needed."""
parsed = urlparse(source)
if parsed.scheme in {"http", "https"}:
suffix = Path(parsed.path).suffix or ".zip"
with tempfile.NamedTemporaryFile(suffix=suffix, delete=False) as tmp:
temp_path = Path(tmp.name)
urlretrieve(source, temp_path)
return temp_path
return Path(source).expanduser().resolve()
def _resolve_external_skill_dir(self, source_path: Path) -> Path:
"""Resolve external source path to a skill directory containing SKILL.md."""
if not source_path.exists():
raise FileNotFoundError(f"Source does not exist: {source_path}")
if source_path.is_dir():
if (source_path / "SKILL.md").exists():
return source_path
children = [
item for item in source_path.iterdir()
if item.is_dir() and (item / "SKILL.md").exists()
]
if len(children) == 1:
return children[0]
raise ValueError(
"Source directory must contain SKILL.md "
"or exactly one child directory containing SKILL.md."
)
if source_path.suffix.lower() != ".zip":
raise ValueError("External source file must be a .zip archive.")
temp_root = Path(tempfile.mkdtemp(prefix="external_skill_"))
with zipfile.ZipFile(source_path, "r") as archive:
archive.extractall(temp_root)
candidates = [
item.parent
for item in temp_root.rglob("SKILL.md")
if item.is_file()
]
unique = []
for item in candidates:
if item not in unique:
unique.append(item)
if len(unique) != 1:
raise ValueError(
"Zip archive must contain exactly one skill directory with SKILL.md."
)
return unique[0]
def update_agent_skill_overrides(
self,
config_name: str,
agent_id: str,
*,
enable: Iterable[str] | None = None,
disable: Iterable[str] | None = None,
) -> Dict[str, List[str]]:
"""Persist per-agent enabled/disabled skill overrides in agent.yaml."""
asset_dir = self.get_agent_asset_dir(config_name, agent_id)
asset_dir.mkdir(parents=True, exist_ok=True)
config_path = asset_dir / "agent.yaml"
current = load_agent_workspace_config(config_path)
values = dict(current.values)
enabled = _dedupe_preserve_order(current.enabled_skills)
disabled_set = set(current.disabled_skills)
for skill_name in enable or []:
if skill_name not in enabled:
enabled.append(skill_name)
disabled_set.discard(skill_name)
for skill_name in disable or []:
disabled_set.add(skill_name)
enabled = [item for item in enabled if item != skill_name]
values["enabled_skills"] = enabled
values["disabled_skills"] = sorted(disabled_set)
config_path.write_text(
yaml.safe_dump(values, allow_unicode=True, sort_keys=False),
encoding="utf-8",
)
return {
"enabled_skills": enabled,
"disabled_skills": sorted(disabled_set),
}
def forget_agent_skill_overrides(
self,
config_name: str,
agent_id: str,
skill_names: Iterable[str],
) -> Dict[str, List[str]]:
"""Remove skills from both enabled/disabled overrides in agent.yaml."""
asset_dir = self.get_agent_asset_dir(config_name, agent_id)
asset_dir.mkdir(parents=True, exist_ok=True)
config_path = asset_dir / "agent.yaml"
current = load_agent_workspace_config(config_path)
values = dict(current.values)
removed = set(skill_names)
enabled = [item for item in current.enabled_skills if item not in removed]
disabled = [item for item in current.disabled_skills if item not in removed]
values["enabled_skills"] = enabled
values["disabled_skills"] = disabled
config_path.write_text(
yaml.safe_dump(values, allow_unicode=True, sort_keys=False),
encoding="utf-8",
)
return {
"enabled_skills": enabled,
"disabled_skills": disabled,
}
def ensure_activation_manifest(self, config_name: str) -> Path:
manifest_path = self.get_activation_manifest_path(config_name)
manifest_path.parent.mkdir(parents=True, exist_ok=True)
if not manifest_path.exists():
manifest_path.write_text(
"global_enabled_skills: []\n"
"global_disabled_skills: []\n"
"agent_enabled_skills: {}\n"
"agent_disabled_skills: {}\n",
encoding="utf-8",
)
return manifest_path
def load_activation_manifest(self, config_name: str) -> Dict[str, object]:
manifest_path = self.ensure_activation_manifest(config_name)
with open(manifest_path, "r", encoding="utf-8") as file:
parsed = yaml.safe_load(file) or {}
return parsed if isinstance(parsed, dict) else {}
def _resolve_source_dir(self, skill_name: str) -> Path:
customized_dir = self.customized_root / skill_name
if customized_dir.exists():
return customized_dir
builtin_dir = self.builtin_root / skill_name
if builtin_dir.exists():
return builtin_dir
raise FileNotFoundError(f"Unknown skill: {skill_name}")
def _resolve_agent_skill_source_dir(
self,
config_name: str,
agent_id: str,
skill_name: str,
) -> Path:
"""Resolve one skill from the agent-local workspace or shared registry."""
for root in (
self.get_agent_local_root(config_name, agent_id),
self.get_agent_installed_root(config_name, agent_id),
):
candidate = root / skill_name
if candidate.exists() and (candidate / "SKILL.md").exists():
return candidate
return self._resolve_source_dir(skill_name)
def _skill_exists_for_agent(
self,
config_name: str,
agent_id: str,
skill_name: str,
) -> bool:
try:
self._resolve_agent_skill_source_dir(config_name, agent_id, skill_name)
except FileNotFoundError:
return False
return True
def _persist_runtime_edits(
self,
config_name: str,
skill_name: str,
active_dir: Path,
) -> None:
"""
Persist run-time edits from active skills into customized skills.
This keeps active skill experiments from being lost on the next reload
while still allowing the active directory to be re-synced cleanly.
"""
if not active_dir.exists():
return
source_dir = self._resolve_source_dir(skill_name)
if active_dir.resolve() == source_dir.resolve():
return
if not self._directories_match(active_dir, source_dir):
customized_dir = self.customized_root / skill_name
customized_dir.parent.mkdir(parents=True, exist_ok=True)
if customized_dir.exists():
shutil.rmtree(customized_dir)
shutil.copytree(active_dir, customized_dir)
@staticmethod
def _directories_match(left: Path, right: Path) -> bool:
"""Compare two directory trees by file contents."""
if not left.exists() or not right.exists():
return False
left_items = sorted(
path.relative_to(left)
for path in left.rglob("*")
)
right_items = sorted(
path.relative_to(right)
for path in right.rglob("*")
)
if left_items != right_items:
return False
for relative_path in left_items:
left_path = left / relative_path
right_path = right / relative_path
if left_path.is_dir() != right_path.is_dir():
return False
if left_path.is_file():
if left_path.read_bytes() != right_path.read_bytes():
return False
return True
def resolve_agent_skill_names(
self,
config_name: str,
agent_id: str,
default_skills: Iterable[str],
) -> List[str]:
"""Resolve final skill names after bootstrap and activation overlays."""
bootstrap = get_bootstrap_config_for_run(self.project_root, config_name)
override = bootstrap.agent_override(agent_id)
skills = list(override.get("skills", list(default_skills)))
agent_config = load_agent_workspace_config(
self.get_agent_asset_dir(config_name, agent_id) / "agent.yaml",
)
for skill_name in agent_config.enabled_skills:
if skill_name not in skills:
skills.append(skill_name)
manifest = self.load_activation_manifest(config_name)
for skill_name in manifest.get("global_enabled_skills", []):
if skill_name not in skills:
skills.append(skill_name)
for skill_name in manifest.get("agent_enabled_skills", {}).get(agent_id, []):
if skill_name not in skills:
skills.append(skill_name)
disabled = set(manifest.get("global_disabled_skills", []))
disabled.update(
manifest.get("agent_disabled_skills", {}).get(agent_id, []),
)
disabled.update(agent_config.disabled_skills)
for item in self.list_agent_local_skills(config_name, agent_id):
if item.skill_name not in skills:
skills.append(item.skill_name)
return [
skill
for skill in skills
if skill not in disabled
and self._skill_exists_for_agent(config_name, agent_id, skill)
]
def sync_skill_dirs(
self,
target_root: Path,
skill_sources: Dict[str, Path],
) -> List[Path]:
"""Sync selected skill directories into one target root."""
target_root.mkdir(parents=True, exist_ok=True)
synced_paths: List[Path] = []
wanted = set(skill_sources)
for existing in target_root.iterdir():
if existing.is_dir() and existing.name not in wanted:
shutil.rmtree(existing)
for skill_name, source_dir in skill_sources.items():
target_dir = target_root / skill_name
if target_dir.exists():
shutil.rmtree(target_dir)
shutil.copytree(source_dir, target_dir)
synced_paths.append(target_dir)
return synced_paths
def sync_active_skills(
self,
target_root: Path,
skill_names: Iterable[str],
) -> List[Path]:
"""Sync selected shared skills into one active directory."""
skill_sources = {
skill_name: self._resolve_source_dir(skill_name)
for skill_name in skill_names
}
return self.sync_skill_dirs(target_root, skill_sources)
def prepare_active_skills(
self,
config_name: str,
agent_defaults: Dict[str, Iterable[str]],
auto_reload: bool = False,
) -> Dict[str, List[Path]]:
"""Resolve all agent skills into per-agent installed/active workspaces."""
resolved: Dict[str, List[str]] = {}
union: List[str] = []
for agent_id, default_skills in agent_defaults.items():
resolved_skills = self.resolve_agent_skill_names(
config_name=config_name,
agent_id=agent_id,
default_skills=default_skills,
)
resolved[agent_id] = resolved_skills
for skill_name in resolved_skills:
if skill_name not in union:
union.append(skill_name)
# Maintain the legacy union directory for compatibility/debugging.
# Agent-local skills remain private to the agent workspace.
self.sync_active_skills(
target_root=self.get_active_root(config_name),
skill_names=[
skill_name
for skill_name in union
if self._is_shared_skill(skill_name)
],
)
active_map: Dict[str, List[Path]] = {}
for agent_id, skill_names in resolved.items():
installed_sources = {
skill_name: self._resolve_source_dir(skill_name)
for skill_name in skill_names
if (self.get_agent_local_root(config_name, agent_id) / skill_name).exists() is False
}
installed_paths = self.sync_skill_dirs(
target_root=self.get_agent_installed_root(config_name, agent_id),
skill_sources=installed_sources,
)
local_root = self.get_agent_local_root(config_name, agent_id)
local_sources = {
skill_name: local_root / skill_name
for skill_name in skill_names
if (local_root / skill_name).exists()
}
active_sources = {
path.name: path for path in installed_paths
}
active_sources.update(local_sources)
active_map[agent_id] = self.sync_skill_dirs(
target_root=self.get_agent_active_root(config_name, agent_id),
skill_sources=active_sources,
)
disabled_names = _dedupe_preserve_order(
self._resolve_disabled_skill_names(
config_name=config_name,
agent_id=agent_id,
default_skills=agent_defaults.get(agent_id, []),
),
)
disabled_sources = {
skill_name: self._resolve_agent_skill_source_dir(
config_name=config_name,
agent_id=agent_id,
skill_name=skill_name,
)
for skill_name in disabled_names
}
self.sync_skill_dirs(
target_root=self.get_agent_disabled_root(config_name, agent_id),
skill_sources=disabled_sources,
)
if auto_reload:
self.watch_active_skills(config_name, agent_defaults)
return active_map
def _is_shared_skill(self, skill_name: str) -> bool:
try:
self._resolve_source_dir(skill_name)
except FileNotFoundError:
return False
return True
def watch_active_skills(
self,
config_name: str,
agent_defaults: Dict[str, Iterable[str]],
callback: Optional[Any] = None,
) -> "_SkillsWatcher":
"""Start file system monitoring on active skill directories.
Args:
config_name: Run configuration name.
agent_defaults: Map of agent_id -> default skill names.
callback: Optional callable invoked on file changes with
(changed_paths: List[Path]).
Returns:
A _SkillsWatcher instance. Call .stop() to halt monitoring.
"""
if not WATCHDOG_AVAILABLE:
raise ImportError(
"watchdog is required for watch_active_skills. "
"Install it with: pip install watchdog"
)
watched_paths: List[Path] = []
for agent_id in agent_defaults:
active_root = self.get_agent_active_root(config_name, agent_id)
if active_root.exists():
watched_paths.append(active_root)
local_root = self.get_agent_local_root(config_name, agent_id)
if local_root.exists():
watched_paths.append(local_root)
handler = _SkillsChangeHandler(watched_paths, self._pending_skill_changes, callback, self._lock)
observer = Observer()
for path in watched_paths:
observer.schedule(handler, str(path), recursive=True)
observer.start()
return _SkillsWatcher(observer, handler)
def reload_skills_if_changed(
self,
config_name: str,
agent_defaults: Dict[str, Iterable[str]],
) -> Dict[str, List[Path]]:
"""Check for file changes and reload active skills if needed.
Args:
config_name: Run configuration name.
agent_defaults: Map of agent_id -> default skill names.
Returns:
Map of agent_id -> list of reloaded skill paths, or empty dict
if no changes were detected.
"""
with self._lock:
changed = self._pending_skill_changes.get(config_name)
if not changed:
return {}
self._pending_skill_changes[config_name] = set()
return self.prepare_active_skills(config_name, agent_defaults)
# -------------------------------------------------------------------------
# Internal change-tracking state (populated by _SkillsChangeHandler)
# -------------------------------------------------------------------------
# Legacy class-level reference kept for migration compatibility
_pending_skill_changes: Dict[str, Set[Path]] = {}
def _resolve_disabled_skill_names(
self,
config_name: str,
agent_id: str,
default_skills: Iterable[str],
) -> List[str]:
"""Resolve explicit disabled skills for one agent."""
bootstrap = get_bootstrap_config_for_run(self.project_root, config_name)
override = bootstrap.agent_override(agent_id)
baseline = list(override.get("skills", list(default_skills)))
agent_config = load_agent_workspace_config(
self.get_agent_asset_dir(config_name, agent_id) / "agent.yaml",
)
manifest = self.load_activation_manifest(config_name)
disabled = list(manifest.get("global_disabled_skills", []))
disabled.extend(manifest.get("agent_disabled_skills", {}).get(agent_id, []))
disabled.extend(agent_config.disabled_skills)
for skill_name in baseline:
if skill_name in agent_config.disabled_skills and skill_name not in disabled:
disabled.append(skill_name)
for item in self.list_agent_local_skills(config_name, agent_id):
if item.skill_name in agent_config.disabled_skills and item.skill_name not in disabled:
disabled.append(item.skill_name)
return [
skill
for skill in disabled
if self._skill_exists_for_agent(config_name, agent_id, skill)
]
class _SkillsWatcher:
"""Handle returned by watch_active_skills; call .stop() to halt monitoring."""
def __init__(self, observer: Observer, handler: "_SkillsChangeHandler") -> None:
self._observer = observer
self._handler = handler
def stop(self) -> None:
"""Stop the file system observer."""
self._observer.stop()
self._observer.join()
class _SkillsChangeHandler(FileSystemEventHandler):
"""Collects file-change events on skill directories."""
def __init__(
self,
watched_paths: List[Path],
pending_changes: Dict[str, Set[Path]],
callback: Optional[Any] = None,
lock: Optional[Lock] = None,
) -> None:
super().__init__()
self._watched_paths = watched_paths
self._pending_changes = pending_changes
self._callback = callback
self._lock = lock
def on_any_event(self, event: FileSystemEvent) -> None:
if event.is_directory:
return
src_path = Path(event.src_path)
for watched in self._watched_paths:
if src_path.is_relative_to(watched):
run_id = self._run_id_from_path(src_path)
if self._lock:
with self._lock:
self._pending_changes.setdefault(run_id, set()).add(src_path)
else:
self._pending_changes.setdefault(run_id, set()).add(src_path)
if self._callback:
self._callback([src_path])
break
@staticmethod
def _run_id_from_path(path: Path) -> str:
"""Infer config_name from a path like runs/{config_name}/skills/active/..."""
parts = path.parts
for i, part in enumerate(parts):
if part == "runs" and i + 1 < len(parts):
return parts[i + 1]
return "default"
def _dedupe_preserve_order(items: Iterable[str]) -> List[str]:
result: List[str] = []
for item in items:
if item not in result:
result.append(item)
return result
def _normalize_skill_name(raw_name: str) -> str:
normalized = str(raw_name or "").strip().lower().replace(" ", "_").replace("-", "_")
allowed = [ch for ch in normalized if ch.isalnum() or ch == "_"]
return "".join(allowed).strip("_")

View File

@@ -0,0 +1,18 @@
# -*- coding: utf-8 -*-
"""Team module for multi-agent orchestration.
Provides inter-agent communication, task delegation, and coordination
for subagent spawning and lifecycle management.
"""
from .messenger import AgentMessenger
from .task_delegator import TaskDelegator
from .team_coordinator import TeamCoordinator
from .registry import AgentRegistry
__all__ = [
"AgentMessenger",
"TaskDelegator",
"TeamCoordinator",
"AgentRegistry",
]

View File

@@ -0,0 +1,225 @@
# -*- coding: utf-8 -*-
"""AgentMessenger - Pub/sub inter-agent communication.
Provides broadcast(), send(), and subscribe() for message passing
between agents using AgentScope's Msg format.
"""
from __future__ import annotations
import asyncio
import logging
from typing import Any, Callable, Dict, List, Optional, Set
from agentscope.message import Msg
logger = logging.getLogger(__name__)
class AgentMessenger:
"""Pub/sub messenger for inter-agent communication.
Supports:
- broadcast(): Send message to all subscribers
- send(): Send message to specific agent
- subscribe(): Register callback for agent messages
- announce(): Send system-wide announcement
- enable_auto_broadcast: Auto-broadcast agent replies to all participants
Messages use AgentScope's Msg format for compatibility.
"""
def __init__(self, enable_auto_broadcast: bool = False):
"""Initialize the messenger.
Args:
enable_auto_broadcast: If True, agent replies are automatically
broadcast to all subscribed agents.
"""
self._subscriptions: Dict[str, List[Callable[[Msg], None]]] = {}
self._inbox: Dict[str, List[Msg]] = {}
self._locks: Dict[str, asyncio.Lock] = {}
self._enable_auto_broadcast = enable_auto_broadcast
self._participants: Set[str] = set()
def subscribe(
self,
agent_id: str,
callback: Callable[[Msg], None],
) -> None:
"""Subscribe an agent to receive messages.
Args:
agent_id: Target agent identifier
callback: Async function to call when message received
"""
if agent_id not in self._subscriptions:
self._subscriptions[agent_id] = []
self._subscriptions[agent_id].append(callback)
logger.debug("Agent %s subscribed to messages", agent_id)
def unsubscribe(self, agent_id: str, callback: Callable[[Msg], None]) -> None:
"""Unsubscribe an agent from messages.
Args:
agent_id: Target agent identifier
callback: Callback to remove
"""
if agent_id in self._subscriptions:
try:
self._subscriptions[agent_id].remove(callback)
logger.debug("Agent %s unsubscribed from messages", agent_id)
except ValueError:
pass
async def send(
self,
to_agent: str,
message: Msg,
) -> None:
"""Send message to specific agent.
Args:
to_agent: Target agent identifier
message: Message to send (uses Msg format)
"""
async def _deliver():
if to_agent in self._subscriptions:
for callback in self._subscriptions[to_agent]:
try:
if asyncio.iscoroutinefunction(callback):
await callback(message)
else:
callback(message)
except Exception as e:
logger.error(
"Error delivering message to %s: %s",
to_agent,
e,
)
await _deliver()
async def broadcast(self, message: Msg) -> None:
"""Broadcast message to all subscribed agents.
Args:
message: Message to broadcast (uses Msg format)
"""
delivery_tasks = []
for agent_id, callbacks in self._subscriptions.items():
for callback in callbacks:
async def _deliver(cb=callback, aid=agent_id):
try:
if asyncio.iscoroutinefunction(cb):
await cb(message)
else:
cb(message)
except Exception as e:
logger.error(
"Error broadcasting to %s: %s",
aid,
e,
)
delivery_tasks.append(_deliver())
if delivery_tasks:
await asyncio.gather(*delivery_tasks)
def inbox(self, agent_id: str) -> List[Msg]:
"""Get and clear inbox for agent.
Args:
agent_id: Agent identifier
Returns:
List of messages in inbox
"""
messages = self._inbox.get(agent_id, [])
self._inbox[agent_id] = []
return messages
def inbox_count(self, agent_id: str) -> int:
"""Count messages in agent's inbox without clearing.
Args:
agent_id: Agent identifier
Returns:
Number of messages waiting
"""
return len(self._inbox.get(agent_id, []))
def add_participant(self, agent_id: str) -> None:
"""Add a participant to the messenger.
Participants are the agents that can receive auto-broadcast messages.
Args:
agent_id: Agent identifier to add
"""
self._participants.add(agent_id)
logger.debug("Agent %s added as participant", agent_id)
def remove_participant(self, agent_id: str) -> None:
"""Remove a participant from the messenger.
Args:
agent_id: Agent identifier to remove
"""
self._participants.discard(agent_id)
logger.debug("Agent %s removed from participants", agent_id)
@property
def enable_auto_broadcast(self) -> bool:
"""Check if auto_broadcast is enabled."""
return self._enable_auto_broadcast
@enable_auto_broadcast.setter
def enable_auto_broadcast(self, value: bool) -> None:
"""Enable or disable auto_broadcast."""
self._enable_auto_broadcast = value
logger.debug("Auto_broadcast set to %s", value)
async def announce(self, message: Msg) -> None:
"""Send a system-wide announcement to all participants.
Unlike broadcast(), announce() sends a message from the system/host
to all participants without requiring prior subscription.
Args:
message: Announcement message (uses Msg format)
"""
logger.info("System announcement: %s", message.content)
await self.broadcast(message)
async def auto_broadcast(self, message: Msg) -> None:
"""Auto-broadcast message to all participants.
This is called internally when enable_auto_broadcast is True.
Broadcasts to all registered participants.
Args:
message: Message to auto-broadcast (uses Msg format)
"""
if not self._enable_auto_broadcast:
return
# Broadcast to all participants
for participant_id in self._participants:
if participant_id in self._subscriptions:
for callback in self._subscriptions[participant_id]:
try:
if asyncio.iscoroutinefunction(callback):
await callback(message)
else:
callback(message)
except Exception as e:
logger.error(
"Error auto-broadcasting to %s: %s",
participant_id,
e,
)
__all__ = ["AgentMessenger"]

View File

@@ -0,0 +1,188 @@
# -*- coding: utf-8 -*-
"""AgentRegistry - Agent registration and lookup by role.
Provides register(), unregister(), and get_by_role() for agent
discovery and management.
"""
from __future__ import annotations
import logging
from typing import Any, Dict, List, Optional
from agentscope.message import Msg
logger = logging.getLogger(__name__)
class AgentRegistry:
"""Registry for agent instances with role-based lookup.
Supports:
- register(): Add agent with roles
- unregister(): Remove agent
- get_by_role(): Find agents by role
- get_by_id(): Get specific agent
Each agent can have multiple roles for flexible dispatch.
"""
def __init__(self):
self._agents: Dict[str, Any] = {}
self._roles: Dict[str, List[str]] = {}
self._agent_roles: Dict[str, List[str]] = {}
def register(
self,
agent_id: str,
agent: Any,
roles: Optional[List[str]] = None,
) -> None:
"""Register an agent with optional roles.
Args:
agent_id: Unique agent identifier
agent: Agent instance
roles: Optional list of role strings
"""
self._agents[agent_id] = agent
self._agent_roles[agent_id] = roles or []
for role in self._agent_roles[agent_id]:
if role not in self._roles:
self._roles[role] = []
if agent_id not in self._roles[role]:
self._roles[role].append(agent_id)
logger.info(
"Registered agent %s with roles %s",
agent_id,
self._agent_roles[agent_id],
)
def unregister(self, agent_id: str) -> bool:
"""Unregister an agent.
Args:
agent_id: Agent identifier to remove
Returns:
True if agent was removed
"""
if agent_id not in self._agents:
return False
roles = self._agent_roles.pop(agent_id, [])
for role in roles:
if role in self._roles:
try:
self._roles[role].remove(agent_id)
except ValueError:
pass
del self._agents[agent_id]
logger.info("Unregistered agent: %s", agent_id)
return True
def get_by_id(self, agent_id: str) -> Optional[Any]:
"""Get agent by ID.
Args:
agent_id: Agent identifier
Returns:
Agent instance or None
"""
return self._agents.get(agent_id)
def get_by_role(self, role: str) -> List[Any]:
"""Get all agents with a given role.
Args:
role: Role string to search for
Returns:
List of agent instances with the role
"""
agent_ids = self._roles.get(role, [])
return [self._agents[aid] for aid in agent_ids if aid in self._agents]
def get_by_roles(self, roles: List[str]) -> List[Any]:
"""Get agents matching ANY of the given roles.
Args:
roles: List of role strings
Returns:
List of unique agent instances matching any role
"""
seen = set()
result = []
for role in roles:
for agent in self.get_by_role(role):
if id(agent) not in seen:
seen.add(id(agent))
result.append(agent)
return result
def list_agents(self) -> List[str]:
"""List all registered agent IDs.
Returns:
List of agent identifiers
"""
return list(self._agents.keys())
def list_roles(self) -> List[str]:
"""List all registered roles.
Returns:
List of role strings
"""
return list(self._roles.keys())
def list_roles_for_agent(self, agent_id: str) -> List[str]:
"""List roles for specific agent.
Args:
agent_id: Agent identifier
Returns:
List of role strings
"""
return list(self._agent_roles.get(agent_id, []))
def update_roles(self, agent_id: str, roles: List[str]) -> None:
"""Update roles for an existing agent.
Args:
agent_id: Agent identifier
roles: New list of roles
"""
if agent_id not in self._agents:
raise KeyError(f"Agent not registered: {agent_id}")
old_roles = self._agent_roles.get(agent_id, [])
for role in old_roles:
if role in self._roles:
try:
self._roles[role].remove(agent_id)
except ValueError:
pass
self._agent_roles[agent_id] = roles
for role in roles:
if role not in self._roles:
self._roles[role] = []
if agent_id not in self._roles[role]:
self._roles[role].append(agent_id)
logger.info("Updated roles for agent %s: %s", agent_id, roles)
@property
def agents(self) -> Dict[str, Any]:
"""Get copy of registered agents dict."""
return dict(self._agents)
__all__ = ["AgentRegistry"]

View File

@@ -0,0 +1,620 @@
# -*- coding: utf-8 -*-
"""TaskDelegator - Subagent spawning and task delegation.
Provides delegate() and delegate_parallel() for spawning subagents
with separate context and memory. Supports runtime dynamic subagent
definition via task_data with description, prompt, and tools.
"""
from __future__ import annotations
import asyncio
import logging
import uuid
from typing import Any, Awaitable, Callable, Dict, List, Optional, Union
from agentscope.message import Msg
logger = logging.getLogger(__name__)
# Default timeout for subagent execution (seconds)
DEFAULT_EXECUTION_TIMEOUT = 120.0
# Type alias for subagent specification
SubagentSpec = Dict[str, Any]
"""Subagent specification format:
{
"description": "Expert code reviewer...",
"prompt": "Analyze code quality...",
"tools": ["Read", "Glob", "Grep"], # Optional: list of tool names
"model": "gpt-4o", # Optional: model name
}
"""
class TaskDelegator:
"""Delegates tasks to subagents with isolated context.
Supports:
- delegate(): Spawn single subagent for task
- delegate_parallel(): Spawn multiple subagents concurrently
- delegate_task(): Delegate with dynamic subagent definition from task_data
Each subagent gets its own memory/context to prevent
cross-contamination.
Dynamic Subagent Definition:
task_data can include an "agents" dict to define subagents inline:
task_data = {
"task": "Review the code changes",
"agents": {
"code-reviewer": {
"description": "Expert code reviewer for quality and security.",
"prompt": "Analyze code quality and suggest improvements.",
"tools": ["Read", "Glob", "Grep"],
}
}
}
"""
def __init__(self, agent: Any):
"""Initialize TaskDelegator.
Args:
agent: Parent EvoAgent instance for accessing model, formatter, workspace
"""
self._agent = agent
# Get messenger from parent agent if available
self._messenger = getattr(agent, "messenger", None)
self._registry = getattr(agent, "_registry", None)
self._subagents: Dict[str, Any] = {}
self._dynamic_subagents: Dict[str, SubagentSpec] = {}
self._tasks: Dict[str, asyncio.Task] = {}
# Extract model and formatter from parent agent
self._model = getattr(agent, "model", None)
self._formatter = getattr(agent, "formatter", None)
self._workspace_dir = getattr(agent, "workspace_dir", None)
self._config_name = getattr(agent, "config_name", None)
async def delegate(
self,
agent_id: str,
task: Callable[..., Awaitable[Msg]],
context: Optional[Dict[str, Any]] = None,
) -> asyncio.Task:
"""Delegate task to a single subagent.
Args:
agent_id: Unique identifier for this subagent instance
task: Async function representing the task
context: Optional context dict for the subagent
Returns:
asyncio.Task for the delegated task
"""
async def _run_with_context():
result = await task(context or {})
return result
self._tasks[agent_id] = asyncio.create_task(_run_with_context())
logger.info("Delegated task to subagent: %s", agent_id)
return self._tasks[agent_id]
async def delegate_parallel(
self,
tasks: List[Dict[str, Any]],
) -> List[asyncio.Task]:
"""Delegate multiple tasks in parallel.
Args:
tasks: List of task dicts with keys:
- agent_id: Unique identifier
- task: Async function to execute
- context: Optional context dict
Returns:
List of asyncio.Task for all delegated tasks
"""
async def _run_task(task_def: Dict[str, Any]):
agent_id = task_def["agent_id"]
task_func = task_def["task"]
context = task_def.get("context", {})
async def _run_with_context():
return await task_func(context)
self._tasks[agent_id] = asyncio.create_task(_run_with_context())
return self._tasks[agent_id]
gathered_tasks = await asyncio.gather(
*[_run_task(t) for t in tasks],
return_exceptions=True,
)
valid_tasks = [t for t in gathered_tasks if isinstance(t, asyncio.Task)]
logger.info(
"Delegated %d tasks in parallel (%d succeeded)",
len(tasks),
len(valid_tasks),
)
return valid_tasks
async def wait_for(self, agent_id: str, timeout: Optional[float] = None) -> Any:
"""Wait for subagent task to complete.
Args:
agent_id: Subagent identifier
timeout: Optional timeout in seconds
Returns:
Task result
Raises:
asyncio.TimeoutError: If task doesn't complete in time
KeyError: If agent_id not found
"""
if agent_id not in self._tasks:
raise KeyError(f"Unknown subagent: {agent_id}")
try:
return await asyncio.wait_for(
self._tasks[agent_id],
timeout=timeout,
)
except asyncio.TimeoutError:
logger.warning("Task %s timed out after %s seconds", agent_id, timeout)
raise
async def cancel(self, agent_id: str) -> bool:
"""Cancel a subagent task.
Args:
agent_id: Subagent identifier
Returns:
True if task was cancelled
"""
if agent_id in self._tasks:
self._tasks[agent_id].cancel()
del self._tasks[agent_id]
logger.info("Cancelled subagent task: %s", agent_id)
return True
return False
def list_tasks(self) -> List[str]:
"""List active subagent task IDs.
Returns:
List of agent_ids with pending tasks
"""
return list(self._tasks.keys())
@property
def tasks(self) -> Dict[str, asyncio.Task]:
"""Get copy of active tasks dict."""
return dict(self._tasks)
async def delegate_task(
self,
task_type: str,
task_data: Dict[str, Any],
target_agent: Optional[str] = None,
) -> Dict[str, Any]:
"""Delegate a task with optional dynamic subagent definition.
Supports runtime subagent definition via task_data["agents"]:
task_data = {
"task": "Review code changes",
"agents": {
"code-reviewer": {
"description": "Expert code reviewer...",
"prompt": "Analyze code quality...",
"tools": ["Read", "Glob", "Grep"],
}
}
}
Args:
task_type: Type of task (e.g., "analysis", "review", "research")
task_data: Task payload, may include "agents" for dynamic subagent def
target_agent: Optional specific agent ID to delegate to
Returns:
Dict with "success" and result/error
"""
try:
# Extract dynamic subagent definitions from task_data
agents_def = task_data.get("agents", {})
if agents_def:
# Register dynamic subagents
for agent_name, agent_spec in agents_def.items():
self._dynamic_subagents[agent_name] = agent_spec
logger.info(
"Registered dynamic subagent: %s (description: %s)",
agent_name,
agent_spec.get("description", "")[:50],
)
# Determine target agent
effective_target = target_agent
if not effective_target:
# Use first available dynamic subagent or default
if agents_def:
effective_target = next(iter(agents_def.keys()))
else:
effective_target = "default"
# Execute the task (async)
task_result = await self._execute_task(
task_type=task_type,
task_data=task_data,
target_agent=effective_target,
)
# Clean up dynamic subagents after execution
for agent_name in agents_def.keys():
self._dynamic_subagents.pop(agent_name, None)
return {
"success": True,
"result": task_result,
"subagents_used": list(agents_def.keys()) if agents_def else [],
}
except Exception as e:
logger.error("Task delegation failed: %s", e)
return {
"success": False,
"error": str(e),
}
async def _execute_task(
self,
task_type: str,
task_data: Dict[str, Any],
target_agent: str,
) -> Dict[str, Any]:
"""Execute the delegated task with a real subagent.
Args:
task_type: Type of task
task_data: Task payload
target_agent: Target agent identifier
Returns:
Task execution result with success/failure info
"""
task_content = task_data.get("task", task_data.get("prompt", ""))
timeout = task_data.get("timeout", DEFAULT_EXECUTION_TIMEOUT)
# Check if we have a dynamic subagent spec for this target
agent_spec = self._dynamic_subagents.get(target_agent)
if agent_spec:
logger.info(
"Executing task '%s' with dynamic subagent '%s'",
task_type,
target_agent,
)
return await self._create_and_run_subagent(
agent_name=target_agent,
agent_spec=agent_spec,
task_content=task_content,
task_type=task_type,
timeout=timeout,
)
# Fallback: try to use parent agent's model to process the task directly
logger.info(
"Executing task '%s' with parent agent '%s' (no dynamic subagent)",
task_type,
target_agent,
)
return await self._run_with_parent_agent(
task_content=task_content,
task_type=task_type,
timeout=timeout,
)
async def _create_and_run_subagent(
self,
agent_name: str,
agent_spec: SubagentSpec,
task_content: str,
task_type: str,
timeout: float,
) -> Dict[str, Any]:
"""Create and run a dynamic subagent.
Args:
agent_name: Name identifier for the subagent
agent_spec: Subagent specification (description, prompt, tools, model)
task_content: Task prompt to send to the subagent
task_type: Type of task
timeout: Execution timeout in seconds
Returns:
Dict with execution results
"""
subagent_id = f"subagent_{agent_name}_{uuid.uuid4().hex[:8]}"
try:
# Create subagent instance
subagent = await self._create_subagent(
subagent_id=subagent_id,
agent_spec=agent_spec,
)
if subagent is None:
return {
"task_type": task_type,
"task": task_content,
"subagent": agent_name,
"status": "failed",
"error": "Failed to create subagent",
"message": f"Could not instantiate subagent '{agent_name}'",
}
# Store for potential cleanup
self._subagents[subagent_id] = subagent
# Execute with timeout
result = await asyncio.wait_for(
self._run_subagent(subagent, task_content),
timeout=timeout,
)
# Extract response content
response_content = ""
if isinstance(result, Msg):
response_content = result.content
elif hasattr(result, "content"):
response_content = str(result.content)
elif isinstance(result, dict):
response_content = result.get("content", str(result))
else:
response_content = str(result)
logger.info(
"Subagent '%s' completed task '%s' successfully",
agent_name,
task_type,
)
return {
"task_type": task_type,
"task": task_content,
"subagent": {
"name": agent_name,
"id": subagent_id,
"description": agent_spec.get("description", ""),
},
"status": "completed",
"response": response_content,
"message": f"Task '{task_type}' executed with subagent '{agent_name}'",
}
except asyncio.TimeoutError:
logger.warning(
"Subagent '%s' timed out after %.1f seconds for task '%s'",
agent_name,
timeout,
task_type,
)
# Cancel the task if still running
if subagent_id in self._subagents:
self._subagents.pop(subagent_id, None)
return {
"task_type": task_type,
"task": task_content,
"subagent": agent_name,
"status": "timeout",
"error": f"Execution timed out after {timeout} seconds",
"message": f"Task '{task_type}' timed out for subagent '{agent_name}'",
}
except Exception as e:
logger.error(
"Subagent '%s' failed for task '%s': %s",
agent_name,
task_type,
e,
exc_info=True,
)
# Cleanup on failure
if subagent_id in self._subagents:
self._subagents.pop(subagent_id, None)
return {
"task_type": task_type,
"task": task_content,
"subagent": agent_name,
"status": "error",
"error": str(e),
"message": f"Task '{task_type}' failed for subagent '{agent_name}': {e}",
}
async def _create_subagent(
self,
subagent_id: str,
agent_spec: SubagentSpec,
) -> Optional[Any]:
"""Create a subagent instance.
Uses the parent agent's model/formatter to create a lightweight
subagent for task execution.
Args:
subagent_id: Unique identifier for the subagent
agent_spec: Subagent specification
Returns:
Subagent instance or None if creation fails
"""
try:
# Import here to avoid circular imports
from agentscope.memory import InMemoryMemory
# Get model and formatter from parent
model = self._model
formatter = self._formatter
if model is None:
logger.error("Cannot create subagent: parent agent has no model")
return None
# Build system prompt from agent spec
description = agent_spec.get("description", "")
prompt_template = agent_spec.get("prompt", "")
system_prompt = f"""You are {description}
{prompt_template}
Your task is to complete the user's request below.
"""
# Create a minimal ReActAgent as the subagent
from agentscope.agent import ReActAgent
subagent = ReActAgent(
name=subagent_id,
model=model,
sys_prompt=system_prompt,
toolkit=None, # Could load tools from agent_spec.get("tools", [])
memory=InMemoryMemory(),
formatter=formatter,
max_iters=agent_spec.get("max_iters", 5),
)
logger.debug("Created subagent: %s", subagent_id)
return subagent
except Exception as e:
logger.error(
"Failed to create subagent '%s': %s",
subagent_id,
e,
exc_info=True,
)
return None
async def _run_subagent(
self,
subagent: Any,
task_content: str,
) -> Any:
"""Run a subagent with the given task.
Args:
subagent: Subagent instance
task_content: Task prompt
Returns:
Agent response (Msg or similar)
"""
from agentscope.message import Msg
# Create message for the subagent
task_msg = Msg(
name="user",
content=task_content,
role="user",
)
# Execute the agent
response = await subagent.reply(task_msg)
return response
async def _run_with_parent_agent(
self,
task_content: str,
task_type: str,
timeout: float,
) -> Dict[str, Any]:
"""Run task using the parent agent directly.
Used when no dynamic subagent is defined.
Args:
task_content: Task prompt
task_type: Type of task
timeout: Execution timeout
Returns:
Dict with execution results
"""
try:
result = await asyncio.wait_for(
self._agent.reply(Msg(
name="user",
content=task_content,
role="user",
)),
timeout=timeout,
)
response_content = ""
if isinstance(result, Msg):
response_content = result.content
elif hasattr(result, "content"):
response_content = str(result.content)
else:
response_content = str(result)
return {
"task_type": task_type,
"task": task_content,
"status": "completed",
"response": response_content,
"message": f"Task '{task_type}' executed with parent agent",
}
except asyncio.TimeoutError:
return {
"task_type": task_type,
"task": task_content,
"status": "timeout",
"error": f"Execution timed out after {timeout} seconds",
"message": f"Task '{task_type}' timed out",
}
except Exception as e:
logger.error(
"Parent agent failed for task '%s': %s",
task_type,
e,
exc_info=True,
)
return {
"task_type": task_type,
"task": task_content,
"status": "error",
"error": str(e),
"message": f"Task '{task_type}' failed: {e}",
}
def get_dynamic_subagent(self, name: str) -> Optional[SubagentSpec]:
"""Get a dynamically defined subagent specification.
Args:
name: Subagent name
Returns:
Subagent spec dict or None if not found
"""
return self._dynamic_subagents.get(name)
def list_dynamic_subagents(self) -> List[str]:
"""List all registered dynamic subagent names.
Returns:
List of subagent names
"""
return list(self._dynamic_subagents.keys())
__all__ = ["TaskDelegator", "SubagentSpec"]

View File

@@ -0,0 +1,389 @@
# -*- coding: utf-8 -*-
"""TeamCoordinator - Agent lifecycle management and execution.
Provides run_parallel() using asyncio.gather() and run_sequential()
for coordinating multiple agents.
"""
from __future__ import annotations
import asyncio
import logging
from typing import Any, Awaitable, Callable, Dict, List, Optional, Type
from agentscope.message import Msg
logger = logging.getLogger(__name__)
class TeamCoordinator:
"""Coordinates agent lifecycle and execution.
Supports:
- run_parallel(): Execute multiple agents concurrently with asyncio.gather()
- run_sequential(): Execute agents one after another
- run_phase(): Execute a named phase with registered agents
- register_agent(): Add agent to coordinator
- unregister_agent(): Remove agent from coordinator
Each agent maintains separate context/memory.
"""
def __init__(
self,
participants: Optional[List[Any]] = None,
task_content: Optional[str] = None,
messenger: Optional[Any] = None,
registry: Optional[Any] = None,
):
"""Initialize TeamCoordinator.
Args:
participants: List of agent instances to coordinate
task_content: Task description content for the agents
messenger: AgentMessenger for communication (optional)
registry: AgentRegistry for agent lookup (optional)
"""
self._participants = participants or []
self._task_content = task_content or ""
self._messenger = messenger
self._registry = registry
self._agents: Dict[str, Any] = {}
self._running_tasks: Dict[str, asyncio.Task] = {}
# Auto-register participants
for agent in self._participants:
if hasattr(agent, "name"):
self._agents[agent.name] = agent
elif hasattr(agent, "id"):
self._agents[agent.id] = agent
def register_agent(self, agent_id: str, agent: Any) -> None:
"""Register an agent with the coordinator.
Args:
agent_id: Unique agent identifier
agent: Agent instance
"""
self._agents[agent_id] = agent
logger.info("Registered agent: %s", agent_id)
def unregister_agent(self, agent_id: str) -> None:
"""Unregister an agent from the coordinator.
Args:
agent_id: Agent identifier to remove
"""
if agent_id in self._agents:
del self._agents[agent_id]
logger.info("Unregistered agent: %s", agent_id)
def get_agent(self, agent_id: str) -> Any:
"""Get registered agent by ID.
Args:
agent_id: Agent identifier
Returns:
Agent instance
"""
return self._agents.get(agent_id)
def list_agents(self) -> List[str]:
"""List all registered agent IDs.
Returns:
List of agent identifiers
"""
return list(self._agents.keys())
async def run_parallel(
self,
agent_ids: List[str],
initial_message: Optional[Msg] = None,
) -> Dict[str, Any]:
"""Run multiple agents in parallel using asyncio.gather().
Args:
agent_ids: List of agent IDs to run concurrently
initial_message: Optional initial message to broadcast
Returns:
Dict mapping agent_id to result
"""
async def _run_agent(aid: str) -> tuple[str, Any]:
agent = self._agents.get(aid)
if agent is None:
logger.error("Agent %s not found", aid)
return (aid, None)
try:
if hasattr(agent, "reply") and asyncio.iscoroutinefunction(agent.reply):
if initial_message:
result = await agent.reply(initial_message)
else:
result = await agent.reply()
elif hasattr(agent, "run") and asyncio.iscoroutinefunction(agent.run):
result = await agent.run()
else:
result = await agent()
logger.info("Agent %s completed successfully", aid)
return (aid, result)
except Exception as e:
logger.error("Agent %s failed: %s", aid, e)
return (aid, {"error": str(e)})
results = await asyncio.gather(
*[_run_agent(aid) for aid in agent_ids],
return_exceptions=True,
)
output: Dict[str, Any] = {}
for result in results:
if isinstance(result, tuple):
agent_id, agent_result = result
output[agent_id] = agent_result
else:
logger.error("Unexpected result from asyncio.gather: %s", result)
logger.info("Parallel run completed for %d agents", len(agent_ids))
return output
async def run_sequential(
self,
agent_ids: List[str],
initial_message: Optional[Msg] = None,
) -> Dict[str, Any]:
"""Run agents one after another in order.
Args:
agent_ids: List of agent IDs to run in sequence
initial_message: Optional initial message for first agent
Returns:
Dict mapping agent_id to result
"""
output: Dict[str, Any] = {}
current_message = initial_message
for agent_id in agent_ids:
agent = self._agents.get(agent_id)
if agent is None:
logger.error("Agent %s not found", agent_id)
output[agent_id] = {"error": "Agent not found"}
continue
try:
if hasattr(agent, "reply") and asyncio.iscoroutinefunction(agent.reply):
result = await agent.reply(current_message)
elif hasattr(agent, "run") and asyncio.iscoroutinefunction(agent.run):
result = await agent.run()
else:
result = await agent()
output[agent_id] = result
current_message = result
logger.info("Agent %s completed sequentially", agent_id)
except Exception as e:
logger.error("Agent %s failed: %s", agent_id, e)
output[agent_id] = {"error": str(e)}
break
logger.info("Sequential run completed for %d agents", len(agent_ids))
return output
async def run_phase(
self,
phase_name: str,
agent_ids: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> List[Any]:
"""Execute a named phase with registered agents.
Args:
phase_name: Name of the phase (e.g., "analyst_analysis")
agent_ids: Optional list of agent IDs; if None, uses all registered
metadata: Optional metadata to include in the message (e.g., tickers, date)
Returns:
List of results from each agent
"""
if agent_ids is None:
agent_ids = list(self._agents.keys())
_agent_ids = [aid for aid in agent_ids if aid in self._agents]
logger.info(
"Running phase '%s' with %d agents: %s",
phase_name,
len(_agent_ids),
_agent_ids,
)
# Create messages for each agent
results: List[Any] = []
for agent_id in _agent_ids:
agent = self._agents[agent_id]
try:
if hasattr(agent, "reply") and asyncio.iscoroutinefunction(agent.reply):
# Create a message for the agent with proper structure
msg = Msg(
name="system",
content=self._task_content or f"Please execute phase: {phase_name}",
role="user",
metadata=metadata,
)
result = await agent.reply(msg)
elif hasattr(agent, "run") and asyncio.iscoroutinefunction(agent.run):
result = await agent.run()
else:
result = await agent()
results.append(result)
logger.info("Phase '%s': Agent %s completed", phase_name, agent_id)
except Exception as e:
logger.error("Phase '%s': Agent %s failed: %s", phase_name, agent_id, e)
results.append(None)
logger.info("Phase '%s' completed with %d results", phase_name, len(results))
return results
async def run_with_dependencies(
self,
agent_tasks: Dict[str, List[str]],
initial_message: Optional[Msg] = None,
) -> Dict[str, Any]:
"""Run agents respecting dependency graph.
Args:
agent_tasks: Dict mapping agent_id to list of prerequisite agent_ids
initial_message: Optional initial message
Returns:
Dict mapping agent_id to result
"""
completed: Dict[str, Any] = {}
remaining = set(agent_tasks.keys())
while remaining:
ready = [
aid for aid in remaining
if all(dep in completed for dep in agent_tasks.get(aid, []))
]
if not ready:
logger.error("Circular dependency detected in agent tasks")
for aid in remaining:
completed[aid] = {"error": "Circular dependency"}
break
results = await self.run_parallel(ready, initial_message)
completed.update(results)
for aid in ready:
remaining.discard(aid)
initial_message = results.get(aid)
return completed
async def fanout_pipeline(
self,
agents: List[Any],
msg: Optional[Msg] = None,
) -> List[Msg]:
"""Fanout a message to multiple agents concurrently and collect all responses.
Similar to AgentScope's fanout_pipeline, this sends the same message
to all specified agents and returns a list of all agent responses.
Args:
agents: List of agent instances to fanout the message to
msg: Message to send to all agents (optional)
Returns:
List of Msg responses from each agent (in the same order as input agents)
Example:
>>> responses = await fanout_pipeline(
... agents=[alice, bob, charlie],
... msg=question,
... )
>>> # responses is a list of Msg responses from each agent
"""
async def _fanout_to_agent(agent: Any) -> Optional[Msg]:
"""Send message to a single agent and return its response."""
try:
if hasattr(agent, "reply") and asyncio.iscoroutinefunction(agent.reply):
result = await agent.reply(msg) if msg is not None else await agent.reply()
elif hasattr(agent, "run") and asyncio.iscoroutinefunction(agent.run):
result = await agent.run()
else:
result = await agent()
# Convert result to Msg if needed
if result is None:
return None
if isinstance(result, Msg):
return result
# If result is a dict with content, wrap it
if isinstance(result, dict) and "content" in result:
return Msg(
name=getattr(agent, "name", "unknown"),
content=result.get("content", ""),
role="assistant",
metadata=result.get("metadata"),
)
# Otherwise wrap the result
return Msg(
name=getattr(agent, "name", "unknown"),
content=str(result),
role="assistant",
)
except Exception as e:
logger.error("Agent %s failed in fanout_pipeline: %s",
getattr(agent, "name", "unknown"), e)
return None
# Run all agents concurrently
results = await asyncio.gather(
*[_fanout_to_agent(agent) for agent in agents],
return_exceptions=True,
)
# Filter out exceptions and keep only valid responses
responses: List[Msg] = []
for i, result in enumerate(results):
if isinstance(result, Exception):
logger.error("Fanout to agent %d failed: %s", i, result)
responses.append(None) # type: ignore[arg-type]
else:
responses.append(result) # type: ignore[arg-type]
logger.info("Fanout pipeline completed for %d agents", len(agents))
return responses
async def shutdown(self, timeout: Optional[float] = 5.0) -> None:
"""Shutdown all running agents gracefully.
Args:
timeout: Timeout for graceful shutdown
"""
logger.info("Shutting down TeamCoordinator...")
cancel_tasks = [
asyncio.create_task(asyncio.wait_for(task, timeout=timeout))
for task in self._running_tasks.values()
]
if cancel_tasks:
await asyncio.gather(*cancel_tasks, return_exceptions=True)
self._running_tasks.clear()
logger.info("TeamCoordinator shutdown complete")
@property
def agents(self) -> Dict[str, Any]:
"""Get copy of registered agents dict."""
return dict(self._agents)
__all__ = ["TeamCoordinator"]

View File

@@ -0,0 +1,132 @@
# -*- coding: utf-8 -*-
"""Run-scoped team pipeline configuration helpers."""
from __future__ import annotations
from pathlib import Path
from typing import Iterable, List, Dict, Any
import yaml
DEFAULT_FILENAME = "TEAM_PIPELINE.yaml"
def team_pipeline_path(project_root: Path, config_name: str) -> Path:
"""Return run-scoped team pipeline config path."""
return project_root / "runs" / config_name / DEFAULT_FILENAME
def ensure_team_pipeline_config(
project_root: Path,
config_name: str,
default_analysts: Iterable[str],
) -> Path:
"""Ensure TEAM_PIPELINE.yaml exists for one run."""
path = team_pipeline_path(project_root, config_name)
path.parent.mkdir(parents=True, exist_ok=True)
if path.exists():
return path
payload = {
"version": 1,
"controller_agent": "portfolio_manager",
"discussion": {
"allow_dynamic_team_update": True,
"active_analysts": list(default_analysts),
},
"decision": {
"require_risk_manager": True,
},
}
path.write_text(
yaml.safe_dump(payload, allow_unicode=True, sort_keys=False),
encoding="utf-8",
)
return path
def load_team_pipeline_config(project_root: Path, config_name: str) -> Dict[str, Any]:
"""Load TEAM_PIPELINE.yaml and return parsed dict."""
path = team_pipeline_path(project_root, config_name)
if not path.exists():
return {}
parsed = yaml.safe_load(path.read_text(encoding="utf-8")) or {}
return parsed if isinstance(parsed, dict) else {}
def save_team_pipeline_config(
project_root: Path,
config_name: str,
config: Dict[str, Any],
) -> Path:
"""Persist TEAM_PIPELINE.yaml."""
path = team_pipeline_path(project_root, config_name)
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(
yaml.safe_dump(config, allow_unicode=True, sort_keys=False),
encoding="utf-8",
)
return path
def resolve_active_analysts(
project_root: Path,
config_name: str,
available_analysts: Iterable[str],
) -> List[str]:
"""Resolve active analysts from TEAM_PIPELINE.yaml."""
available = [item for item in available_analysts]
parsed = load_team_pipeline_config(project_root, config_name)
discussion = parsed.get("discussion", {}) if isinstance(parsed, dict) else {}
configured = discussion.get("active_analysts", [])
if not isinstance(configured, list) or not configured:
return available
active = [item for item in configured if item in available]
return active or available
def update_active_analysts(
project_root: Path,
config_name: str,
available_analysts: Iterable[str],
*,
add: Iterable[str] | None = None,
remove: Iterable[str] | None = None,
set_to: Iterable[str] | None = None,
) -> List[str]:
"""Update active analysts and persist TEAM_PIPELINE.yaml."""
available = [item for item in available_analysts]
ensure_team_pipeline_config(project_root, config_name, available)
parsed = load_team_pipeline_config(project_root, config_name)
discussion = parsed.setdefault("discussion", {})
if not isinstance(discussion, dict):
discussion = {}
parsed["discussion"] = discussion
current = discussion.get("active_analysts", [])
if not isinstance(current, list):
current = []
current = [item for item in current if item in available]
if not current:
current = list(available)
if set_to is not None:
target = [item for item in set_to if item in available]
current = target or current
for item in add or []:
if item in available and item not in current:
current.append(item)
for item in remove or []:
current = [existing for existing in current if existing != item]
if not current:
current = [available[0]] if available else []
discussion["active_analysts"] = current
save_team_pipeline_config(project_root, config_name, parsed)
return current

View File

@@ -0,0 +1,517 @@
# -*- coding: utf-8 -*-
"""Toolkit factory following AgentScope's skill + tool group practices.
支持从Agent工作空间动态创建工具集加载builtin/customized技能
以及合并Agent特定工具。
"""
from typing import Any, Dict, Iterable, List, Optional, Set
from pathlib import Path
import yaml
from backend.agents.agent_workspace import load_agent_workspace_config
from backend.agents.skills_manager import SkillsManager
from backend.agents.skill_loader import load_skill_from_dir, get_skill_tools
from backend.agents.skill_metadata import parse_skill_metadata
from backend.config.bootstrap_config import get_bootstrap_config_for_run
def load_agent_profiles() -> Dict[str, Dict[str, Any]]:
"""加载Agent配置文件"""
config_path = SkillsManager().project_root / "backend" / "config" / "agent_profiles.yaml"
with open(config_path, "r", encoding="utf-8") as file:
return yaml.safe_load(file) or {}
def _register_analysis_tool_groups(toolkit: Any) -> None:
"""注册分析工具组"""
from backend.tools.analysis_tools import TOOL_REGISTRY
tool_groups = {
"fundamentals": {
"description": "Financial health, profitability, growth, and efficiency analysis tools.",
"active": False,
"notes": (
"Use these tools to validate business quality, financial resilience, "
"and earnings durability before making directional conclusions."
),
"tools": [
"analyze_profitability",
"analyze_growth",
"analyze_financial_health",
"analyze_efficiency_ratios",
"analyze_valuation_ratios",
"get_financial_metrics_tool",
],
},
"technical": {
"description": "Trend, momentum, mean reversion, and volatility analysis tools.",
"active": False,
"notes": (
"Use these tools to assess timing, price structure, and risk-reward in "
"the current market regime."
),
"tools": [
"analyze_trend_following",
"analyze_momentum",
"analyze_mean_reversion",
"analyze_volatility",
],
},
"sentiment": {
"description": "News sentiment and insider activity analysis tools.",
"active": False,
"notes": (
"Use these tools to capture short-horizon catalysts, sentiment shifts, "
"and behavioral signals around each ticker."
),
"tools": [
"analyze_news_sentiment",
"analyze_insider_trading",
],
},
"valuation": {
"description": "Intrinsic value and relative valuation analysis tools.",
"active": False,
"notes": (
"Use these tools when the task requires fair value estimation, margin of "
"safety analysis, or valuation scenario comparison."
),
"tools": [
"dcf_valuation_analysis",
"owner_earnings_valuation_analysis",
"ev_ebitda_valuation_analysis",
"residual_income_valuation_analysis",
],
},
}
for group_name, group_config in tool_groups.items():
toolkit.create_tool_group(
group_name=group_name,
description=group_config["description"],
active=group_config["active"],
notes=group_config["notes"],
)
for tool_name in group_config["tools"]:
tool_func = TOOL_REGISTRY.get(tool_name)
if tool_func:
toolkit.register_tool_function(
tool_func,
group_name=group_name,
)
def _register_portfolio_tool_groups(toolkit: Any, pm_agent: Any) -> None:
"""注册投资组合工具组"""
toolkit.create_tool_group(
group_name="portfolio_ops",
description="Portfolio decision recording tools.",
active=False,
notes=(
"Use portfolio tools only after synthesizing analyst and risk inputs. "
"Record one explicit decision per ticker."
),
)
toolkit.register_tool_function(
pm_agent._make_decision,
group_name="portfolio_ops",
)
if hasattr(pm_agent, "_add_team_analyst"):
toolkit.register_tool_function(
pm_agent._add_team_analyst,
group_name="portfolio_ops",
)
if hasattr(pm_agent, "_remove_team_analyst"):
toolkit.register_tool_function(
pm_agent._remove_team_analyst,
group_name="portfolio_ops",
)
if hasattr(pm_agent, "_set_active_analysts"):
toolkit.register_tool_function(
pm_agent._set_active_analysts,
group_name="portfolio_ops",
)
if hasattr(pm_agent, "_create_team_analyst"):
toolkit.register_tool_function(
pm_agent._create_team_analyst,
group_name="portfolio_ops",
)
def _register_risk_tool_groups(toolkit: Any) -> None:
"""注册风险工具组"""
from backend.tools.risk_tools import (
assess_margin_and_liquidity,
assess_position_concentration,
assess_volatility_exposure,
)
toolkit.create_tool_group(
group_name="risk_ops",
description="Risk diagnostics for concentration, leverage, and volatility.",
active=False,
notes=(
"Use risk tools to quantify concentration, margin pressure, and volatility "
"before writing the final risk memo."
),
)
toolkit.register_tool_function(
assess_position_concentration,
group_name="risk_ops",
)
toolkit.register_tool_function(
assess_margin_and_liquidity,
group_name="risk_ops",
)
toolkit.register_tool_function(
assess_volatility_exposure,
group_name="risk_ops",
)
def create_agent_toolkit(
agent_id: str,
config_name: str,
owner: Any = None,
active_skill_dirs: Iterable[str] | None = None,
) -> Any:
"""Create a Toolkit with agent skills and grouped tools.
Args:
agent_id: Agent标识符
config_name: 运行配置名称
owner: Agent实例用于注册特定方法
active_skill_dirs: 显式指定的活动技能目录列表
Returns:
配置好的Toolkit实例
"""
from agentscope.tool import Toolkit
profiles = load_agent_profiles()
profile = profiles.get(agent_id, {})
skills_manager = SkillsManager()
agent_config = load_agent_workspace_config(
skills_manager.get_agent_asset_dir(config_name, agent_id) / "agent.yaml",
)
bootstrap_config = get_bootstrap_config_for_run(
skills_manager.project_root,
config_name,
)
override = bootstrap_config.agent_override(agent_id)
active_groups = override.get(
"active_tool_groups",
agent_config.active_tool_groups
or profile.get("active_tool_groups", []),
)
disabled_groups = set(agent_config.disabled_tool_groups)
if disabled_groups:
active_groups = [
group_name
for group_name in active_groups
if group_name not in disabled_groups
]
toolkit = Toolkit(
agent_skill_instruction=(
"<system-info>You have access to project skills. Each skill lives in a "
"directory and is described by SKILL.md. Follow the skill instructions "
"when they are relevant to the current task.</system-info>"
),
agent_skill_template="- {name} (dir: {dir}): {description}",
)
if agent_id.endswith("_analyst"):
_register_analysis_tool_groups(toolkit)
elif agent_id == "portfolio_manager" and owner is not None:
_register_portfolio_tool_groups(toolkit, owner)
elif agent_id == "risk_manager":
_register_risk_tool_groups(toolkit)
if active_skill_dirs is None:
skill_names = skills_manager.resolve_agent_skill_names(
config_name=config_name,
agent_id=agent_id,
default_skills=profile.get("skills", []),
)
active_skill_dirs = [
skills_manager.get_agent_active_root(config_name, agent_id) / skill_name
for skill_name in skill_names
]
for skill_dir in active_skill_dirs:
toolkit.register_agent_skill(str(skill_dir))
apply_skill_tool_restrictions(toolkit, active_skill_dirs)
if active_groups:
toolkit.update_tool_groups(group_names=active_groups, active=True)
return toolkit
def create_toolkit_from_workspace(
agent_id: str,
config_name: str,
owner: Any = None,
include_builtin: bool = True,
include_customized: bool = True,
include_local: bool = True,
active_groups: Optional[List[str]] = None,
) -> Any:
"""从Agent工作空间创建工具集
这是create_agent_toolkit的增强版本支持更灵活的技能加载策略。
Args:
agent_id: Agent标识符
config_name: 运行配置名称
owner: Agent实例
include_builtin: 是否包含builtin技能
include_customized: 是否包含customized技能
include_local: 是否包含agent-local技能
active_groups: 显式指定的活动工具组
Returns:
配置好的Toolkit实例
"""
from agentscope.tool import Toolkit
skills_manager = SkillsManager()
agent_config = load_agent_workspace_config(
skills_manager.get_agent_asset_dir(config_name, agent_id) / "agent.yaml",
)
toolkit = Toolkit(
agent_skill_instruction=(
"<system-info>You have access to project skills. Each skill lives in a "
"directory and is described by SKILL.md. Follow the skill instructions "
"when they are relevant to the current task.</system-info>"
),
agent_skill_template="- {name} (dir: {dir}): {description}",
)
# 注册Agent类型的默认工具组
if agent_id.endswith("_analyst"):
_register_analysis_tool_groups(toolkit)
elif agent_id == "portfolio_manager" and owner is not None:
_register_portfolio_tool_groups(toolkit, owner)
elif agent_id == "risk_manager":
_register_risk_tool_groups(toolkit)
# 收集所有要加载的技能目录
skill_dirs: List[Path] = []
# 1. 从active目录加载已同步的技能
active_root = skills_manager.get_agent_active_root(config_name, agent_id)
if active_root.exists():
for skill_dir in sorted(active_root.iterdir()):
if skill_dir.is_dir() and (skill_dir / "SKILL.md").exists():
skill_dirs.append(skill_dir)
# 2. 从installed目录加载
installed_root = skills_manager.get_agent_installed_root(config_name, agent_id)
if installed_root.exists():
for skill_dir in sorted(installed_root.iterdir()):
if skill_dir.is_dir() and (skill_dir / "SKILL.md").exists():
if skill_dir not in skill_dirs:
skill_dirs.append(skill_dir)
# 3. 从local目录加载agent-local技能
if include_local:
local_root = skills_manager.get_agent_local_root(config_name, agent_id)
if local_root.exists():
for skill_dir in sorted(local_root.iterdir()):
if skill_dir.is_dir() and (skill_dir / "SKILL.md").exists():
if skill_dir not in skill_dirs:
skill_dirs.append(skill_dir)
# 注册技能到toolkit
for skill_dir in skill_dirs:
toolkit.register_agent_skill(str(skill_dir))
apply_skill_tool_restrictions(toolkit, skill_dirs)
# 激活指定的工具组
if active_groups is None:
# 从配置中读取
profiles = load_agent_profiles()
profile = profiles.get(agent_id, {})
active_groups = agent_config.active_tool_groups or profile.get("active_tool_groups", [])
# 应用禁用列表
disabled_groups = set(agent_config.disabled_tool_groups)
if disabled_groups:
active_groups = [g for g in active_groups if g not in disabled_groups]
if active_groups:
toolkit.update_tool_groups(group_names=active_groups, active=True)
return toolkit
def get_toolkit_info(toolkit: Any) -> Dict[str, Any]:
"""获取工具集信息
Args:
toolkit: Toolkit实例
Returns:
工具集信息字典
"""
info = {
"tool_groups": {},
"skills": [],
"tools_count": 0,
}
# 获取工具组信息
groups = getattr(toolkit, "tool_groups", {})
for name, group in groups.items():
info["tool_groups"][name] = {
"description": getattr(group, "description", ""),
"active": getattr(group, "active", False),
"tools": [t.name for t in getattr(group, "tools", [])],
}
info["tools_count"] += len(getattr(group, "tools", []))
# 获取技能信息
skills = getattr(toolkit, "agent_skills", [])
for skill in skills:
info["skills"].append({
"name": getattr(skill, "name", "unknown"),
"path": getattr(skill, "path", ""),
"description": getattr(skill, "description", ""),
})
return info
def refresh_toolkit_skills(
toolkit: Any,
agent_id: str,
config_name: str,
) -> None:
"""刷新工具集中的技能
重新从工作空间加载技能,用于运行时技能变更。
Args:
toolkit: Toolkit实例
agent_id: Agent标识符
config_name: 运行配置名称
"""
skills_manager = SkillsManager()
# 清除现有技能
if hasattr(toolkit, "agent_skills"):
toolkit.agent_skills.clear()
# 重新加载active技能
active_root = skills_manager.get_agent_active_root(config_name, agent_id)
if active_root.exists():
for skill_dir in sorted(active_root.iterdir()):
if skill_dir.is_dir() and (skill_dir / "SKILL.md").exists():
toolkit.register_agent_skill(str(skill_dir))
# 重新加载local技能
local_root = skills_manager.get_agent_local_root(config_name, agent_id)
if local_root.exists():
for skill_dir in sorted(local_root.iterdir()):
if skill_dir.is_dir() and (skill_dir / "SKILL.md").exists():
toolkit.register_agent_skill(str(skill_dir))
def apply_skill_tool_restrictions(toolkit: Any, skill_dirs: List[Path]) -> None:
"""Apply per-skill allowed_tools / denied_tools restrictions to a toolkit.
If a skill specifies allowed_tools, only those tools are accessible when
that skill is active. If a skill specifies denied_tools, those tools are
removed regardless of allowed_tools. Denied tools take precedence.
This function annotates the toolkit with a _skill_tool_restrictions map
that downstream code can consult when resolving available tools.
Args:
toolkit: The agentscope Toolkit instance.
skill_dirs: List of skill directory paths to inspect.
"""
restrictions: Dict[str, Dict[str, Set[str]]] = {}
for skill_dir in skill_dirs:
metadata = parse_skill_metadata(skill_dir, source="active")
if not metadata.allowed_tools and not metadata.denied_tools:
continue
restrictions[skill_dir.name] = {
"allowed": set(metadata.allowed_tools),
"denied": set(metadata.denied_tools),
}
if hasattr(toolkit, "agent_skills"):
for skill in toolkit.agent_skills:
skill_name = getattr(skill, "name", "") or ""
if skill_name in restrictions:
setattr(
skill,
"_tool_allowed",
restrictions[skill_name]["allowed"],
)
setattr(
skill,
"_tool_denied",
restrictions[skill_name]["denied"],
)
def get_skill_effective_tools(skill: Any) -> Optional[Set[str]]:
"""Return the effective tool set for a skill after applying restrictions.
If the skill has no restrictions (no allowed_tools / denied_tools),
returns None to indicate "all tools allowed".
If allowed_tools is set, returns only those tools minus denied_tools.
If only denied_tools is set, returns all tools minus denied_tools.
Args:
skill: A skill object previously registered via register_agent_skill.
Returns:
A set of allowed tool names, or None if unrestricted.
"""
allowed = getattr(skill, "_tool_allowed", None)
denied = getattr(skill, "_tool_denied", set())
if allowed is None:
return None
effective = allowed - denied
return effective
def filter_toolkit_by_skill(
toolkit: Any,
skill_name: str,
) -> Set[str]:
"""Return the set of tool names that are accessible for a given skill.
Args:
toolkit: The agentscope Toolkit instance.
skill_name: Name of the skill to query.
Returns:
Set of allowed tool names, or all registered tool names if unrestricted.
"""
if not hasattr(toolkit, "agent_skills"):
return set()
for skill in toolkit.agent_skills:
name = getattr(skill, "name", "") or ""
if name != skill_name:
continue
effective = get_skill_effective_tools(skill)
if effective is None:
return set()
return effective
return set()

327
backend/agents/workspace.py Normal file
View File

@@ -0,0 +1,327 @@
# -*- coding: utf-8 -*-
"""Workspace Manager - Create and manage agent workspaces."""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, List, Optional
import yaml
logger = logging.getLogger(__name__)
@dataclass
class WorkspaceConfig:
"""Configuration for a workspace."""
workspace_id: str
name: str = ""
description: str = ""
created_at: str = ""
metadata: Dict[str, Any] = field(default_factory=dict)
def to_dict(self) -> Dict[str, Any]:
"""Serialize to dictionary."""
return {
"workspace_id": self.workspace_id,
"name": self.name,
"description": self.description,
"created_at": self.created_at,
"metadata": self.metadata,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "WorkspaceConfig":
"""Create from dictionary."""
return cls(
workspace_id=data.get("workspace_id", ""),
name=data.get("name", ""),
description=data.get("description", ""),
created_at=data.get("created_at", ""),
metadata=data.get("metadata", {}),
)
class WorkspaceRegistry:
"""Registry for persistent workspace definitions (design-time)."""
def __init__(self, project_root: Optional[Path] = None):
"""Initialize the workspace manager.
Args:
project_root: Root directory of the project
"""
self.project_root = project_root or Path(__file__).parent.parent.parent
self.workspaces_root = self.project_root / "workspaces"
self.workspaces_root.mkdir(parents=True, exist_ok=True)
def create_workspace(
self,
workspace_id: str,
name: Optional[str] = None,
description: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> WorkspaceConfig:
"""Create a new workspace with directory structure.
Args:
workspace_id: Unique identifier for the workspace
name: Display name for the workspace
description: Optional description
metadata: Optional metadata dictionary
Returns:
WorkspaceConfig instance
Raises:
ValueError: If workspace already exists
"""
workspace_dir = self.workspaces_root / workspace_id
if workspace_dir.exists():
raise ValueError(f"Workspace '{workspace_id}' already exists")
# Create directory structure
workspace_dir.mkdir(parents=True, exist_ok=True)
# Create subdirectories
(workspace_dir / "agents").mkdir(exist_ok=True)
(workspace_dir / "shared" / "market_data").mkdir(parents=True, exist_ok=True)
(workspace_dir / "shared" / "memories").mkdir(parents=True, exist_ok=True)
# Create workspace.yaml
from datetime import datetime
config = WorkspaceConfig(
workspace_id=workspace_id,
name=name or workspace_id,
description=description or "",
created_at=datetime.now().isoformat(),
metadata=metadata or {},
)
self._write_workspace_config(workspace_dir, config)
return config
def list_workspaces(self) -> List[WorkspaceConfig]:
"""List all workspaces.
Returns:
List of WorkspaceConfig instances
"""
workspaces = []
if not self.workspaces_root.exists():
return workspaces
for workspace_dir in self.workspaces_root.iterdir():
if not workspace_dir.is_dir():
continue
config_path = workspace_dir / "workspace.yaml"
if config_path.exists():
try:
with open(config_path, "r", encoding="utf-8") as f:
data = yaml.safe_load(f) or {}
workspaces.append(WorkspaceConfig.from_dict(data))
except Exception as e:
logger.warning(f"Failed to load workspace config {config_path}: {e}")
return workspaces
def get_workspace_agents(self, workspace_id: str) -> List[Dict[str, Any]]:
"""Get all agents in a workspace.
Args:
workspace_id: ID of the workspace
Returns:
List of agent information dictionaries
Raises:
ValueError: If workspace doesn't exist
"""
workspace_dir = self.workspaces_root / workspace_id
if not workspace_dir.exists():
raise ValueError(f"Workspace '{workspace_id}' does not exist")
agents = []
agents_dir = workspace_dir / "agents"
if not agents_dir.exists():
return agents
for agent_dir in agents_dir.iterdir():
if not agent_dir.is_dir():
continue
config_path = agent_dir / "agent.yaml"
if config_path.exists():
try:
with open(config_path, "r", encoding="utf-8") as f:
config = yaml.safe_load(f) or {}
agents.append({
"agent_id": agent_dir.name,
"agent_type": config.get("agent_type", "unknown"),
"config_path": str(config_path),
})
except Exception as e:
logger.warning(f"Failed to load agent config {config_path}: {e}")
return agents
def get_agent_workspace(self, agent_id: str, workspace_id: str) -> Optional[Path]:
"""Get the workspace path for an agent.
Args:
agent_id: ID of the agent
workspace_id: ID of the workspace
Returns:
Path to agent directory, or None if not found
"""
agent_dir = self.workspaces_root / workspace_id / "agents" / agent_id
if agent_dir.exists():
return agent_dir
return None
def workspace_exists(self, workspace_id: str) -> bool:
"""Check if a workspace exists.
Args:
workspace_id: ID of the workspace
Returns:
True if workspace exists, False otherwise
"""
workspace_dir = self.workspaces_root / workspace_id
return workspace_dir.exists() and (workspace_dir / "workspace.yaml").exists()
def delete_workspace(self, workspace_id: str, force: bool = False) -> bool:
"""Delete a workspace and all its agents.
Args:
workspace_id: ID of the workspace to delete
force: If True, delete even if workspace has agents
Returns:
True if deleted, False if workspace didn't exist
Raises:
ValueError: If workspace has agents and force is False
"""
import shutil
workspace_dir = self.workspaces_root / workspace_id
if not workspace_dir.exists():
return False
# Check for agents
agents_dir = workspace_dir / "agents"
if agents_dir.exists() and any(agents_dir.iterdir()):
if not force:
raise ValueError(
f"Workspace '{workspace_id}' contains agents. "
"Use force=True to delete anyway."
)
shutil.rmtree(workspace_dir)
return True
def get_workspace_path(self, workspace_id: str) -> Path:
"""Get the path to a workspace directory.
Args:
workspace_id: ID of the workspace
Returns:
Path to workspace directory
"""
return self.workspaces_root / workspace_id
def get_shared_data_path(self, workspace_id: str) -> Optional[Path]:
"""Get the shared data directory for a workspace.
Args:
workspace_id: ID of the workspace
Returns:
Path to shared data directory, or None if workspace doesn't exist
"""
workspace_dir = self.workspaces_root / workspace_id
if not workspace_dir.exists():
return None
return workspace_dir / "shared"
def update_workspace_config(
self,
workspace_id: str,
name: Optional[str] = None,
description: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> WorkspaceConfig:
"""Update workspace configuration.
Args:
workspace_id: ID of the workspace
name: New display name (optional)
description: New description (optional)
metadata: Metadata to merge (optional)
Returns:
Updated WorkspaceConfig
Raises:
ValueError: If workspace doesn't exist
"""
workspace_dir = self.workspaces_root / workspace_id
if not workspace_dir.exists():
raise ValueError(f"Workspace '{workspace_id}' does not exist")
config_path = workspace_dir / "workspace.yaml"
current_config = {}
if config_path.exists():
try:
with open(config_path, "r", encoding="utf-8") as f:
current_config = yaml.safe_load(f) or {}
except Exception as e:
logger.warning(f"Failed to load existing config {config_path}: {e}")
# Update fields
if name is not None:
current_config["name"] = name
if description is not None:
current_config["description"] = description
if metadata is not None:
current_config["metadata"] = {**current_config.get("metadata", {}), **metadata}
config = WorkspaceConfig.from_dict(current_config)
self._write_workspace_config(workspace_dir, config)
return config
def _write_workspace_config(self, workspace_dir: Path, config: WorkspaceConfig) -> None:
"""Write workspace configuration to file.
Args:
workspace_dir: Workspace directory
config: Workspace configuration
"""
config_path = workspace_dir / "workspace.yaml"
with open(config_path, "w", encoding="utf-8") as f:
yaml.safe_dump(config.to_dict(), f, allow_unicode=True, sort_keys=False)
# Backward-compatible alias: legacy imports expect WorkspaceManager.
WorkspaceManager = WorkspaceRegistry

View File

@@ -0,0 +1,483 @@
# -*- coding: utf-8 -*-
"""Initialize run-scoped agent workspace assets."""
from pathlib import Path
from typing import Dict, Iterable, Optional
import yaml
from .skills_manager import SkillsManager
from .team_pipeline_config import ensure_team_pipeline_config
class RunWorkspaceManager:
"""Create and maintain run-level prompt asset files for each agent."""
def __init__(self, project_root: Optional[Path] = None):
self.skills_manager = SkillsManager(project_root=project_root)
self.project_root = self.skills_manager.project_root
def get_run_dir(self, config_name: str) -> Path:
return self.project_root / "runs" / config_name
def ensure_run_workspace(self, config_name: str) -> Path:
run_dir = self.get_run_dir(config_name)
run_dir.mkdir(parents=True, exist_ok=True)
self.skills_manager.ensure_activation_manifest(config_name)
ensure_team_pipeline_config(
project_root=self.project_root,
config_name=config_name,
default_analysts=[
"fundamentals_analyst",
"technical_analyst",
"sentiment_analyst",
"valuation_analyst",
],
)
bootstrap_path = run_dir / "BOOTSTRAP.md"
if not bootstrap_path.exists():
bootstrap_path.write_text(
"---\n"
"tickers:\n"
" - AAPL\n"
" - MSFT\n"
" - GOOGL\n"
" - AMZN\n"
" - NVDA\n"
" - META\n"
" - TSLA\n"
" - AMD\n"
" - NFLX\n"
" - AVGO\n"
" - PLTR\n"
" - COIN\n"
"initial_cash: 100000\n"
"margin_requirement: 0.0\n"
"enable_memory: false\n"
"max_comm_cycles: 2\n"
"agent_overrides: {}\n"
"---\n\n"
"# Bootstrap\n\n"
"Use this file to describe run-specific setup notes, preferred tickers,\n"
"risk bounds, or strategy constraints before the first execution.\n\n"
"The YAML front matter above is machine-readable runtime configuration.\n"
"The markdown body below is injected into agent prompts as run context.\n",
encoding="utf-8",
)
return run_dir
def bootstrap_path(self, config_name: str) -> Path:
return self.get_run_dir(config_name) / "BOOTSTRAP.md"
def ensure_agent_assets(
self,
config_name: str,
agent_id: str,
file_contents: Optional[Dict[str, str]] = None,
persona: Optional[Dict[str, object]] = None,
) -> Path:
asset_dir = self.skills_manager.get_agent_asset_dir(
config_name,
agent_id,
)
asset_dir.mkdir(parents=True, exist_ok=True)
(asset_dir / "skills" / "installed").mkdir(parents=True, exist_ok=True)
(asset_dir / "skills" / "active").mkdir(parents=True, exist_ok=True)
(asset_dir / "skills" / "disabled").mkdir(parents=True, exist_ok=True)
(asset_dir / "skills" / "local").mkdir(parents=True, exist_ok=True)
file_contents = file_contents or self.build_default_agent_files(agent_id=agent_id)
for filename, content in file_contents.items():
legacy_contents = self.build_legacy_agent_file_variants(
agent_id=agent_id,
filename=filename,
persona=persona,
)
self._ensure_file(asset_dir / filename, content, legacy_contents=legacy_contents)
self._ensure_agent_yaml(
asset_dir / "agent.yaml",
agent_id=agent_id,
)
return asset_dir
def build_default_agent_files(
self,
*,
agent_id: str,
persona: Optional[Dict[str, object]] = None,
) -> Dict[str, str]:
"""Build default workspace markdown files for one agent."""
if agent_id.endswith("_analyst"):
return self._build_analyst_files(agent_id=agent_id, persona=persona or {})
if agent_id == "portfolio_manager":
return self._build_portfolio_manager_files()
if agent_id == "risk_manager":
return self._build_risk_manager_files()
return self._build_generic_files(agent_id=agent_id)
def build_legacy_agent_file_variants(
self,
*,
agent_id: str,
filename: str,
persona: Optional[Dict[str, object]] = None,
) -> list[str]:
"""Return known generated legacy variants safe to upgrade in-place."""
persona = persona or {}
variants: list[dict[str, str]] = [
self._build_legacy_english_files(agent_id=agent_id),
self._build_previous_chinese_files(agent_id=agent_id, persona=persona),
]
values: list[str] = []
for item in variants:
content = item.get(filename)
if content:
values.append(content)
return values
def load_agent_file(
self,
*,
config_name: str,
agent_id: str,
filename: str,
) -> str:
"""Load one run-scoped agent workspace file."""
path = self.skills_manager.get_agent_asset_dir(config_name, agent_id) / filename
if not path.exists():
raise FileNotFoundError(f"File not found: {filename}")
return path.read_text(encoding="utf-8")
def update_agent_file(
self,
*,
config_name: str,
agent_id: str,
filename: str,
content: str,
) -> None:
"""Write one run-scoped agent workspace file."""
asset_dir = self.skills_manager.get_agent_asset_dir(config_name, agent_id)
asset_dir.mkdir(parents=True, exist_ok=True)
path = asset_dir / filename
path.write_text(content, encoding="utf-8")
def initialize_default_assets(
self,
config_name: str,
agent_ids: Iterable[str],
analyst_personas: Optional[Dict[str, Dict]] = None,
) -> None:
self.ensure_run_workspace(config_name)
analyst_personas = analyst_personas or {}
for agent_id in agent_ids:
if agent_id.endswith("_analyst"):
persona = analyst_personas.get(agent_id, {})
file_contents = self.build_default_agent_files(
agent_id=agent_id,
persona=persona,
)
else:
persona = None
file_contents = self.build_default_agent_files(agent_id=agent_id)
asset_dir = self.skills_manager.get_agent_asset_dir(config_name, agent_id)
asset_dir.mkdir(parents=True, exist_ok=True)
(asset_dir / "skills" / "installed").mkdir(parents=True, exist_ok=True)
(asset_dir / "skills" / "active").mkdir(parents=True, exist_ok=True)
(asset_dir / "skills" / "disabled").mkdir(parents=True, exist_ok=True)
(asset_dir / "skills" / "local").mkdir(parents=True, exist_ok=True)
for filename, content in file_contents.items():
self._ensure_file(
asset_dir / filename,
content,
legacy_contents=self.build_legacy_agent_file_variants(
agent_id=agent_id,
filename=filename,
persona=persona,
),
)
self._ensure_agent_yaml(asset_dir / "agent.yaml", agent_id=agent_id)
@staticmethod
def _ensure_file(path: Path, content: str, *, legacy_contents: Optional[list[str]] = None) -> None:
if not path.exists():
path.write_text(content, encoding="utf-8")
return
existing = path.read_text(encoding="utf-8")
normalized_existing = existing.strip()
candidates = {item.strip() for item in (legacy_contents or []) if item and item.strip()}
if normalized_existing in candidates:
path.write_text(content, encoding="utf-8")
@staticmethod
def _build_generic_files(agent_id: str) -> Dict[str, str]:
return {
"SOUL.md": (
"# Soul\n\n"
f"你是 `{agent_id}`,语气冷静、客观、专业。保持清晰推理,优先基于数据而不是情绪下结论。\n"
),
"PROFILE.md": (
"# Profile\n\n"
"记录这个 agent 长期稳定的分析风格、偏好、优势与盲点。\n"
),
"AGENTS.md": (
"# Agent Guide\n\n"
"工作要求:\n"
"- 优先使用已激活的技能和工具\n"
"- 结论要明确,过程要可追溯\n"
"- 与其他 agent 协作时保持输入输出简洁\n"
"- 最终输出必须使用简体中文;如需引用英文术语,仅保留专有名词,解释和结论必须用中文\n"
),
"POLICY.md": (
"# Policy\n\n"
"- 给出结论时说明核心驱动因素\n"
"- 明确风险边界和结论失效条件\n"
"- 出现反例时需要纳入最终判断\n"
"- 不要输出英文报告标题、英文摘要或整段英文正文\n"
),
"MEMORY.md": (
"# Memory\n\n"
"记录可复用的经验、失误复盘、有效启发式和需要持续跟踪的提醒。\n"
),
}
@classmethod
def _build_analyst_files(cls, *, agent_id: str, persona: Dict[str, object]) -> Dict[str, str]:
role_name = str(persona.get("name") or agent_id)
focus_items = [
str(item).strip()
for item in persona.get("focus", [])
if str(item).strip()
]
focus_md = "\n".join(f"- {item}" for item in focus_items) or "- 根据当前任务选择最相关的分析维度"
description = str(persona.get("description") or "").strip()
files = cls._build_generic_files(agent_id)
files["SOUL.md"] = (
"# Soul\n\n"
f"你是一位专业的{role_name}\n\n"
"保持谦逊和开放,主动寻找与自己观点相悖的证据,并将其纳入最终评估。"
"你的分析要体现持续演化的投资哲学,而不是一次性的结论。\n"
)
files["PROFILE.md"] = (
"# Profile\n\n"
f"角色定位:{role_name}\n\n"
"你的关注重点:\n"
f"{focus_md}\n\n"
"角色说明:\n"
f"{description or '围绕最关键的基本面、技术面、情绪面或估值因素形成高质量判断。'}\n"
)
files["AGENTS.md"] = (
"# Agent Guide\n\n"
"分析流程:\n"
"- 优先识别真正驱动价值或价格变化的核心变量\n"
"- 使用相关工具和技能补足证据链\n"
"- 给出可验证、可复查、可执行的分析结果\n"
"- 在团队讨论中清晰表达你的论点和反论点\n\n"
"输出要求:\n"
"- 给出明确投资信号:看涨、看跌或中性\n"
"- 包含置信度0-100\n"
"- 如果你确定要分享最终分析,请先给出结论,再给出推理依据\n"
"- 最终输出必须使用简体中文,不要生成英文版 analysis report\n"
)
files["POLICY.md"] = (
"# Policy\n\n"
"- 深化你的投资逻辑,确保每项建议都有清晰、可追溯、可重复的依据\n"
"- 明确风险边界:在什么具体情况下当前结论会失效\n"
"- 做逆向测试:说明市场主流共识与你的不同点\n"
"- 每次分析后反思这次案例如何验证或挑战你现有的信念\n"
"- 即使输入新闻或财报原文是英文,最终表达也必须用中文\n"
)
return files
@classmethod
def _build_portfolio_manager_files(cls) -> Dict[str, str]:
files = cls._build_generic_files("portfolio_manager")
files["SOUL.md"] = (
"# Soul\n\n"
"你是一位负责做出投资决策的投资组合经理。你需要综合多个分析视角,"
"做出保守、明确、资本约束下可执行的组合决策。\n"
)
files["PROFILE.md"] = (
"# Profile\n\n"
"核心职责:\n"
"- 分析分析师和风险管理经理的输入\n"
"- 基于信号和市场情境做出投资决策\n"
"- 使用可用工具记录每个 ticker 的决策\n"
)
files["AGENTS.md"] = (
"# Agent Guide\n\n"
"决策框架:\n"
"- 审阅分析以理解市场观点\n"
"- 在做决策前先考虑风险警告\n"
"- 评估当前投资组合持仓、现金与保证金占用\n"
"- 决策必须与整体投资目标和风险约束一致\n\n"
"决策类型:\n"
'- `long`:看涨,建议买入\n'
'- `short`:看跌,建议卖出或做空\n'
'- `hold`:中性,维持当前持仓\n\n'
"输出要求:\n"
"- 使用 `make_decision` 工具记录每个股票的最终决策\n"
"- 记录完成后给出投资逻辑总结\n"
"- 最终总结必须使用简体中文\n"
)
files["POLICY.md"] = (
"# Policy\n\n"
"- 在决定数量时考虑可用现金,不要超出现金允许范围\n"
"- 考虑做空头寸的保证金要求\n"
"- 仓位规模相对于组合总资产保持保守\n"
"- 始终为决策提供清晰理由\n"
"- 不要输出英文投资报告或英文结论\n"
)
return files
@classmethod
def _build_risk_manager_files(cls) -> Dict[str, str]:
files = cls._build_generic_files("risk_manager")
files["SOUL.md"] = (
"# Soul\n\n"
"你是一位专业的风险管理经理,负责监控投资组合风险并提供风险警告。"
"你的目标不是输出空泛的谨慎,而是给出量化、可执行、可优先级排序的风险意见。\n"
)
files["PROFILE.md"] = (
"# Profile\n\n"
"核心职责:\n"
"- 监控投资组合敞口和集中度风险\n"
"- 评估仓位规模相对于波动性是否合理\n"
"- 评估保证金使用和杠杆水平\n"
"- 识别潜在风险因素并提供警告\n"
"- 基于市场条件建议仓位限制\n"
)
files["AGENTS.md"] = (
"# Agent Guide\n\n"
"决策流程:\n"
"- 优先使用可用的风险工具量化集中度、波动率和保证金压力\n"
"- 结合工具结果与当前市场上下文做判断\n"
"- 生成可操作的风险警告和仓位限制建议\n"
"- 为风险评估提供清晰理由\n\n"
"输出要求:\n"
"- 风险评估要简洁但全面\n"
"- 按严重程度优先排序警告\n"
"- 提供具体、可操作的建议\n"
"- 尽可能包含量化指标\n"
"- 最终风险结论必须使用简体中文\n"
)
files["POLICY.md"] = (
"# Policy\n\n"
"- 先量化,再判断,不要只给抽象风险表述\n"
"- 高严重度风险必须先说\n"
"- 最终结论需要明确仓位限制或调整建议\n"
"- 不要输出英文风险报告或英文摘要\n"
)
return files
@staticmethod
def _build_legacy_english_files(agent_id: str) -> Dict[str, str]:
policy_tail = "Optional run-scoped constraints, limits, or strategy policy.\n\n"
if agent_id == "portfolio_manager":
policy_tail += "Respect cash, margin, and portfolio concentration constraints before recording decisions.\n"
elif agent_id == "risk_manager":
policy_tail += "Use available risk tools before issuing the final risk memo.\n"
elif agent_id.endswith("_analyst"):
policy_tail += "State a clear signal, confidence, and the conditions that would invalidate the thesis.\n"
return {
"SOUL.md": "# Soul\n\nDescribe the agent's temperament, reasoning posture, and voice.\n\n",
"PROFILE.md": "# Profile\n\nTrack this agent's long-lived investment style, preferences, and strengths.\n\n",
"AGENTS.md": "# Agent Guide\n\nDocument how this agent should work, collaborate, and choose tools or skills.\n\n",
"POLICY.md": "# Policy\n\n" + policy_tail,
"MEMORY.md": "# Memory\n\nStore durable lessons, heuristics, and reminders for this agent.\n\n",
}
@classmethod
def _build_previous_chinese_files(cls, *, agent_id: str, persona: Dict[str, object]) -> Dict[str, str]:
if agent_id.endswith("_analyst"):
role_name = str(persona.get("name") or agent_id)
focus_items = [
str(item).strip()
for item in persona.get("focus", [])
if str(item).strip()
]
focus_md = "\n".join(f"- {item}" for item in focus_items) or "- 根据当前任务选择最相关的分析维度"
description = str(persona.get("description") or "").strip()
return {
"SOUL.md": (
"# Soul\n\n"
f"你是一位专业的{role_name}\n\n"
"保持谦逊和开放,主动寻找与自己观点相悖的证据,并将其纳入最终评估。"
"你的分析要体现持续演化的投资哲学,而不是一次性的结论。\n"
),
"PROFILE.md": (
"# Profile\n\n"
f"角色定位:{role_name}\n\n"
"你的关注重点:\n"
f"{focus_md}\n\n"
"角色说明:\n"
f"{description or '围绕最关键的基本面、技术面、情绪面或估值因素形成高质量判断。'}\n"
),
"AGENTS.md": (
"# Agent Guide\n\n"
"分析流程:\n"
"- 优先识别真正驱动价值或价格变化的核心变量\n"
"- 使用相关工具和技能补足证据链\n"
"- 给出可验证、可复查、可执行的分析结果\n"
"- 在团队讨论中清晰表达你的论点和反论点\n\n"
"输出要求:\n"
"- 给出明确投资信号:看涨、看跌或中性\n"
"- 包含置信度0-100\n"
"- 如果你确定要分享最终分析,请先给出结论,再给出推理依据\n"
),
"POLICY.md": (
"# Policy\n\n"
"- 深化你的投资逻辑,确保每项建议都有清晰、可追溯、可重复的依据\n"
"- 明确风险边界:在什么具体情况下当前结论会失效\n"
"- 做逆向测试:说明市场主流共识与你的不同点\n"
"- 每次分析后反思这次案例如何验证或挑战你现有的信念\n"
),
"MEMORY.md": "# Memory\n\n记录可复用的经验、失误复盘、有效启发式和需要持续跟踪的提醒。\n",
}
if agent_id == "portfolio_manager":
return {
"SOUL.md": "# Soul\n\n你是一位负责做出投资决策的投资组合经理。你需要综合多个分析视角,做出保守、明确、资本约束下可执行的组合决策。\n",
"PROFILE.md": "# Profile\n\n核心职责:\n- 分析分析师和风险管理经理的输入\n- 基于信号和市场情境做出投资决策\n- 使用可用工具记录每个 ticker 的决策\n",
"AGENTS.md": "# Agent Guide\n\n决策框架:\n- 审阅分析以理解市场观点\n- 在做决策前先考虑风险警告\n- 评估当前投资组合持仓、现金与保证金占用\n- 决策必须与整体投资目标和风险约束一致\n\n决策类型:\n- `long`:看涨,建议买入\n- `short`:看跌,建议卖出或做空\n- `hold`:中性,维持当前持仓\n\n输出要求:\n- 使用 `make_decision` 工具记录每个股票的最终决策\n- 记录完成后给出投资逻辑总结\n",
"POLICY.md": "# Policy\n\n- 在决定数量时考虑可用现金,不要超出现金允许范围\n- 考虑做空头寸的保证金要求\n- 仓位规模相对于组合总资产保持保守\n- 始终为决策提供清晰理由\n",
"MEMORY.md": "# Memory\n\n记录可复用的经验、失误复盘、有效启发式和需要持续跟踪的提醒。\n",
}
if agent_id == "risk_manager":
return {
"SOUL.md": "# Soul\n\n你是一位专业的风险管理经理,负责监控投资组合风险并提供风险警告。你的目标不是输出空泛的谨慎,而是给出量化、可执行、可优先级排序的风险意见。\n",
"PROFILE.md": "# Profile\n\n核心职责:\n- 监控投资组合敞口和集中度风险\n- 评估仓位规模相对于波动性是否合理\n- 评估保证金使用和杠杆水平\n- 识别潜在风险因素并提供警告\n- 基于市场条件建议仓位限制\n",
"AGENTS.md": "# Agent Guide\n\n决策流程:\n- 优先使用可用的风险工具量化集中度、波动率和保证金压力\n- 结合工具结果与当前市场上下文做判断\n- 生成可操作的风险警告和仓位限制建议\n- 为风险评估提供清晰理由\n\n输出要求:\n- 风险评估要简洁但全面\n- 按严重程度优先排序警告\n- 提供具体、可操作的建议\n- 尽可能包含量化指标\n",
"POLICY.md": "# Policy\n\n- 先量化,再判断,不要只给抽象风险表述\n- 高严重度风险必须先说\n- 最终结论需要明确仓位限制或调整建议\n",
"MEMORY.md": "# Memory\n\n记录可复用的经验、失误复盘、有效启发式和需要持续跟踪的提醒。\n",
}
return cls._build_legacy_english_files(agent_id)
@staticmethod
def _ensure_agent_yaml(path: Path, agent_id: str) -> None:
if path.exists():
return
payload = {
"agent_id": agent_id,
"prompt_files": [
"SOUL.md",
"PROFILE.md",
"AGENTS.md",
"POLICY.md",
"MEMORY.md",
],
"enabled_skills": [],
"disabled_skills": [],
"active_tool_groups": [],
"disabled_tool_groups": [],
}
path.write_text(
yaml.safe_dump(payload, allow_unicode=True, sort_keys=False),
encoding="utf-8",
)
# Backward-compatible alias: code importing WorkspaceManager from this module should continue to work.
WorkspaceManager = RunWorkspaceManager