feat: Add evaluation hooks, skill adaptation and team pipeline config
- Add EvaluationHook for post-execution agent evaluation - Add SkillAdaptationHook for dynamic skill adaptation - Add team/ directory with team coordination logic - Add TEAM_PIPELINE.yaml for smoke_fullstack pipeline config - Update RuntimeView, TraderView and RuntimeSettingsPanel UI - Add runtimeApi and websocket services - Add runtime_state.json to smoke_fullstack state Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -13,6 +13,26 @@ from .command_handler import (
|
||||
create_command_dispatcher,
|
||||
)
|
||||
|
||||
# 评估钩子 (从evaluation_hook.py导入)
|
||||
from .evaluation_hook import (
|
||||
EvaluationHook,
|
||||
EvaluationCollector,
|
||||
MetricType,
|
||||
EvaluationMetric,
|
||||
EvaluationResult,
|
||||
parse_evaluation_hooks,
|
||||
)
|
||||
|
||||
# 技能适配钩子 (从skill_adaptation_hook.py导入)
|
||||
from .skill_adaptation_hook import (
|
||||
AdaptationAction,
|
||||
AdaptationThreshold,
|
||||
AdaptationEvent,
|
||||
SkillAdaptationHook,
|
||||
AdaptationManager,
|
||||
get_adaptation_manager,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# 命令处理
|
||||
"AgentCommandDispatcher",
|
||||
@@ -20,4 +40,18 @@ __all__ = [
|
||||
"CommandHandler",
|
||||
"CommandResult",
|
||||
"create_command_dispatcher",
|
||||
# 评估钩子
|
||||
"EvaluationHook",
|
||||
"EvaluationCollector",
|
||||
"MetricType",
|
||||
"EvaluationMetric",
|
||||
"EvaluationResult",
|
||||
"parse_evaluation_hooks",
|
||||
# 技能适配钩子
|
||||
"AdaptationAction",
|
||||
"AdaptationThreshold",
|
||||
"AdaptationEvent",
|
||||
"SkillAdaptationHook",
|
||||
"AdaptationManager",
|
||||
"get_adaptation_manager",
|
||||
]
|
||||
|
||||
@@ -27,6 +27,7 @@ from .hooks import (
|
||||
HookManager,
|
||||
BootstrapHook,
|
||||
MemoryCompactionHook,
|
||||
WorkspaceWatchHook,
|
||||
HOOK_PRE_REASONING,
|
||||
)
|
||||
from ..prompts.builder import (
|
||||
@@ -36,6 +37,16 @@ from ..prompts.builder import (
|
||||
from ..agent_workspace import load_agent_workspace_config
|
||||
from ..skills_manager import SkillsManager
|
||||
|
||||
# Team infrastructure imports (graceful import - may not exist yet)
|
||||
try:
|
||||
from backend.agents.team.messenger import AgentMessenger
|
||||
from backend.agents.team.task_delegator import TaskDelegator
|
||||
TEAM_INFRA_AVAILABLE = True
|
||||
except ImportError:
|
||||
TEAM_INFRA_AVAILABLE = False
|
||||
AgentMessenger = None
|
||||
TaskDelegator = None
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from agentscope.formatter import FormatterBase
|
||||
from agentscope.model import ModelWrapperBase
|
||||
@@ -152,6 +163,12 @@ class EvoAgent(ToolGuardMixin, ReActAgent):
|
||||
memory_compact_threshold=memory_compact_threshold,
|
||||
)
|
||||
|
||||
# Initialize team infrastructure if available
|
||||
self._messenger: Optional["AgentMessenger"] = None
|
||||
self._task_delegator: Optional["TaskDelegator"] = None
|
||||
if TEAM_INFRA_AVAILABLE:
|
||||
self._init_team_infrastructure()
|
||||
|
||||
logger.info(
|
||||
"EvoAgent initialized: %s (workspace: %s)",
|
||||
agent_id,
|
||||
@@ -268,6 +285,17 @@ class EvoAgent(ToolGuardMixin, ReActAgent):
|
||||
)
|
||||
logger.debug("Registered memory compaction hook")
|
||||
|
||||
# Workspace watch hook - auto-reload markdown files on change
|
||||
workspace_watch_hook = WorkspaceWatchHook(
|
||||
workspace_dir=self.workspace_dir,
|
||||
)
|
||||
self._hook_manager.register(
|
||||
hook_type=HOOK_PRE_REASONING,
|
||||
hook_name="workspace_watch",
|
||||
hook=workspace_watch_hook,
|
||||
)
|
||||
logger.debug("Registered workspace watch hook")
|
||||
|
||||
async def _reasoning(self, **kwargs) -> Msg:
|
||||
"""Override reasoning to execute pre-reasoning hooks.
|
||||
|
||||
@@ -405,7 +433,78 @@ class EvoAgent(ToolGuardMixin, ReActAgent):
|
||||
)
|
||||
]),
|
||||
"registered_hooks": self._hook_manager.list_hooks(),
|
||||
"team_infra_available": TEAM_INFRA_AVAILABLE,
|
||||
}
|
||||
|
||||
def _init_team_infrastructure(self) -> None:
|
||||
"""Initialize team infrastructure components (messenger and task delegator).
|
||||
|
||||
This method initializes the AgentMessenger for inter-agent communication
|
||||
and the TaskDelegator for subagent delegation.
|
||||
"""
|
||||
if not TEAM_INFRA_AVAILABLE:
|
||||
return
|
||||
|
||||
try:
|
||||
self._messenger = AgentMessenger(agent_id=self.agent_id)
|
||||
self._task_delegator = TaskDelegator(agent=self)
|
||||
logger.debug(
|
||||
"Team infrastructure initialized for agent: %s",
|
||||
self.agent_id,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to initialize team infrastructure for %s: %s",
|
||||
self.agent_id,
|
||||
e,
|
||||
)
|
||||
self._messenger = None
|
||||
self._task_delegator = None
|
||||
|
||||
@property
|
||||
def messenger(self) -> Optional["AgentMessenger"]:
|
||||
"""Get the agent's messenger for inter-agent communication.
|
||||
|
||||
Returns:
|
||||
AgentMessenger instance if available, None otherwise
|
||||
"""
|
||||
return self._messenger
|
||||
|
||||
def delegate_task(
|
||||
self,
|
||||
task_type: str,
|
||||
task_data: Dict[str, Any],
|
||||
target_agent: Optional[str] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Delegate a task to a subagent using the TaskDelegator.
|
||||
|
||||
Args:
|
||||
task_type: Type of task to delegate
|
||||
task_data: Data/payload for the task
|
||||
target_agent: Optional specific agent ID to delegate to
|
||||
|
||||
Returns:
|
||||
Dict containing the delegation result
|
||||
"""
|
||||
if not TEAM_INFRA_AVAILABLE or self._task_delegator is None:
|
||||
return {
|
||||
"success": False,
|
||||
"error": "Team infrastructure not available",
|
||||
}
|
||||
|
||||
try:
|
||||
return self._task_delegator.delegate_task(
|
||||
task_type=task_type,
|
||||
task_data=task_data,
|
||||
target_agent=target_agent,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Task delegation failed for %s: %s",
|
||||
self.agent_id,
|
||||
e,
|
||||
)
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
__all__ = ["EvoAgent"]
|
||||
|
||||
@@ -284,19 +284,120 @@ class BootstrapHook(Hook):
|
||||
return None
|
||||
|
||||
|
||||
class WorkspaceWatchHook(Hook):
|
||||
"""Hook for auto-reloading workspace markdown files on change.
|
||||
|
||||
Monitors SOUL.md, AGENTS.md, PROFILE.md, etc. and triggers
|
||||
a prompt rebuild when any of them change. Based on CoPaw's
|
||||
AgentConfigWatcher approach but for markdown files.
|
||||
"""
|
||||
|
||||
# Files to monitor (same as PromptBuilder.DEFAULT_FILES)
|
||||
WATCHED_FILES = frozenset([
|
||||
"SOUL.md", "AGENTS.md", "PROFILE.md", "ROLE.md",
|
||||
"POLICY.md", "MEMORY.md", "HEARTBEAT.md", "STYLE.md",
|
||||
"BOOTSTRAP.md",
|
||||
])
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
workspace_dir: Path,
|
||||
poll_interval: float = 2.0,
|
||||
):
|
||||
"""Initialize workspace watch hook.
|
||||
|
||||
Args:
|
||||
workspace_dir: Workspace directory to monitor
|
||||
poll_interval: How often to check for changes (seconds)
|
||||
"""
|
||||
self.workspace_dir = Path(workspace_dir)
|
||||
self.poll_interval = poll_interval
|
||||
self._last_mtimes: dict[str, float] = {}
|
||||
self._initialized = False
|
||||
|
||||
def _scan_mtimes(self) -> dict[str, float]:
|
||||
"""Scan watched files and return their current mtimes."""
|
||||
mtimes = {}
|
||||
for name in self.WATCHED_FILES:
|
||||
path = self.workspace_dir / name
|
||||
if path.exists():
|
||||
mtimes[name] = path.stat().st_mtime
|
||||
return mtimes
|
||||
|
||||
def _has_changes(self) -> bool:
|
||||
"""Check if any watched file has changed since last check."""
|
||||
current = self._scan_mtimes()
|
||||
|
||||
if not self._initialized:
|
||||
self._last_mtimes = current
|
||||
self._initialized = True
|
||||
return False
|
||||
|
||||
# Check for new, modified, or deleted files
|
||||
if set(current.keys()) != set(self._last_mtimes.keys()):
|
||||
self._last_mtimes = current
|
||||
return True
|
||||
|
||||
for name, mtime in current.items():
|
||||
if mtime != self._last_mtimes.get(name):
|
||||
self._last_mtimes = current
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
async def __call__(
|
||||
self,
|
||||
agent: "ReActAgent",
|
||||
kwargs: Dict[str, Any],
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""Check for file changes and rebuild prompt if needed.
|
||||
|
||||
Args:
|
||||
agent: The agent instance
|
||||
kwargs: Input arguments (unused)
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
try:
|
||||
if self._has_changes():
|
||||
logger.info(
|
||||
"Workspace files changed, triggering prompt rebuild for: %s",
|
||||
getattr(agent, "agent_id", "unknown"),
|
||||
)
|
||||
if hasattr(agent, "rebuild_sys_prompt"):
|
||||
agent.rebuild_sys_prompt()
|
||||
else:
|
||||
logger.warning(
|
||||
"Agent %s has no rebuild_sys_prompt method",
|
||||
getattr(agent, "agent_id", "unknown"),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error("Workspace watch hook failed: %s", e, exc_info=True)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class MemoryCompactionHook(Hook):
|
||||
"""Hook for automatic memory compaction when context is full.
|
||||
|
||||
This hook monitors the token count of messages and triggers compaction
|
||||
when it exceeds the threshold. It preserves the system prompt and recent
|
||||
messages while summarizing older conversation history.
|
||||
|
||||
Based on CoPaw's memory compaction design with additional improvements:
|
||||
- memory_compact_ratio: Ratio to compact when threshold reached
|
||||
- memory_reserve_ratio: Always keep a reserve of tokens for recent messages
|
||||
- enable_tool_result_compact: Compact tool results separately
|
||||
- tool_result_compact_keep_n: Number of tool results to keep
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
memory_manager: Any,
|
||||
memory_compact_threshold: Optional[int] = None,
|
||||
memory_compact_reserve: Optional[int] = None,
|
||||
memory_compact_ratio: float = 0.75,
|
||||
memory_reserve_ratio: float = 0.1,
|
||||
enable_tool_result_compact: bool = False,
|
||||
tool_result_compact_keep_n: int = 5,
|
||||
):
|
||||
@@ -305,13 +406,15 @@ class MemoryCompactionHook(Hook):
|
||||
Args:
|
||||
memory_manager: Memory manager instance for compaction
|
||||
memory_compact_threshold: Token threshold for compaction
|
||||
memory_compact_reserve: Reserve tokens for recent messages
|
||||
memory_compact_ratio: Target ratio to compact to (e.g., 0.75 = compact to 75%)
|
||||
memory_reserve_ratio: Reserve ratio to always keep free (e.g., 0.1 = 10%)
|
||||
enable_tool_result_compact: Enable tool result compaction
|
||||
tool_result_compact_keep_n: Number of tool results to keep
|
||||
"""
|
||||
self.memory_manager = memory_manager
|
||||
self.memory_compact_threshold = memory_compact_threshold
|
||||
self.memory_compact_reserve = memory_compact_reserve
|
||||
self.memory_compact_ratio = memory_compact_ratio
|
||||
self.memory_reserve_ratio = memory_reserve_ratio
|
||||
self.enable_tool_result_compact = enable_tool_result_compact
|
||||
self.tool_result_compact_keep_n = tool_result_compact_keep_n
|
||||
|
||||
@@ -382,32 +485,61 @@ class MemoryCompactionHook(Hook):
|
||||
) -> None:
|
||||
"""Compact memory by summarizing older messages.
|
||||
|
||||
Uses CoPaw-style memory management:
|
||||
- memory_compact_ratio: Target ratio to compact to (e.g., 0.75 means compact to 75%)
|
||||
- memory_reserve_ratio: Always keep this ratio free (e.g., 0.1 means keep 10% for recent)
|
||||
|
||||
Args:
|
||||
agent: The agent instance
|
||||
messages: Current messages in memory
|
||||
"""
|
||||
if self.memory_compact_reserve is None:
|
||||
if self.memory_compact_threshold is None:
|
||||
return
|
||||
|
||||
# Keep recent messages
|
||||
keep_count = min(
|
||||
len(messages) // 4,
|
||||
10, # Max 10 recent messages
|
||||
)
|
||||
keep_count = max(keep_count, 2) # At least 2
|
||||
# Estimate total tokens
|
||||
total_tokens = self._estimate_tokens(messages)
|
||||
|
||||
messages_to_compact = messages[:-keep_count] if keep_count < len(messages) else []
|
||||
# Calculate reserve based on ratio (CoPaw-style)
|
||||
reserve_tokens = int(total_tokens * self.memory_reserve_ratio)
|
||||
|
||||
# Calculate target tokens after compaction
|
||||
target_tokens = int(total_tokens * self.memory_compact_ratio)
|
||||
target_tokens = max(target_tokens, total_tokens - reserve_tokens)
|
||||
|
||||
# Find messages to compact (older ones)
|
||||
# Keep recent messages that fit within target
|
||||
messages_to_compact = []
|
||||
kept_tokens = 0
|
||||
|
||||
# Start from oldest, stop when we've kept enough
|
||||
for msg in messages:
|
||||
msg_tokens = self._estimate_tokens([msg])
|
||||
if kept_tokens + msg_tokens > target_tokens:
|
||||
messages_to_compact.append(msg)
|
||||
else:
|
||||
kept_tokens += msg_tokens
|
||||
|
||||
if not messages_to_compact:
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"Compacting %d messages (%d tokens) to target %d tokens",
|
||||
len(messages_to_compact),
|
||||
self._estimate_tokens(messages_to_compact),
|
||||
target_tokens,
|
||||
)
|
||||
|
||||
# Use memory manager to compact if available
|
||||
if hasattr(self.memory_manager, "compact_memory"):
|
||||
try:
|
||||
summary = await self.memory_manager.compact_memory(
|
||||
messages=messages_to_compact,
|
||||
)
|
||||
logger.info("Memory compacted: %d messages summarized", len(messages_to_compact))
|
||||
logger.info(
|
||||
"Memory compacted: %d messages summarized, summary: %s",
|
||||
len(messages_to_compact),
|
||||
summary[:200] if summary else "N/A",
|
||||
)
|
||||
|
||||
# Mark messages as compressed if supported
|
||||
if hasattr(agent.memory, "update_messages_mark"):
|
||||
@@ -420,6 +552,142 @@ class MemoryCompactionHook(Hook):
|
||||
except Exception as e:
|
||||
logger.error("Memory manager compaction failed: %s", e)
|
||||
|
||||
# Tool result compaction (CoPaw-style)
|
||||
if self.enable_tool_result_compact:
|
||||
await self._compact_tool_results(agent, messages)
|
||||
|
||||
async def _compact_tool_results(
|
||||
self,
|
||||
agent: "ReActAgent",
|
||||
messages: List[Any],
|
||||
) -> None:
|
||||
"""Compact tool results by keeping only recent ones.
|
||||
|
||||
Based on CoPaw's tool_result_compact_keep_n pattern.
|
||||
Tool results can be very verbose, so we keep only the N most recent ones.
|
||||
|
||||
Args:
|
||||
agent: The agent instance
|
||||
messages: Current messages in memory
|
||||
"""
|
||||
if not hasattr(agent.memory, "content"):
|
||||
return
|
||||
|
||||
# Find tool result messages (usually have "tool" role or tool_related content)
|
||||
tool_results = []
|
||||
for msg, _ in agent.memory.content:
|
||||
if hasattr(msg, "role") and msg.role == "tool":
|
||||
tool_results.append(msg)
|
||||
|
||||
if len(tool_results) <= self.tool_result_compact_keep_n:
|
||||
return
|
||||
|
||||
# Keep only the most recent N tool results
|
||||
excess_results = tool_results[:-self.tool_result_compact_keep_n]
|
||||
|
||||
logger.info(
|
||||
"Tool result compaction: %d tool results found, keeping %d, compacting %d",
|
||||
len(tool_results),
|
||||
self.tool_result_compact_keep_n,
|
||||
len(excess_results),
|
||||
)
|
||||
|
||||
# Mark excess tool results as compressed if supported
|
||||
if hasattr(agent.memory, "update_messages_mark"):
|
||||
from agentscope.agent._react_agent import _MemoryMark
|
||||
await agent.memory.update_messages_mark(
|
||||
new_mark=_MemoryMark.COMPRESSED,
|
||||
msg_ids=[msg.id for msg in excess_results],
|
||||
)
|
||||
|
||||
|
||||
class HeartbeatHook(Hook):
|
||||
"""Pre-reasoning hook that injects HEARTBEAT.md content.
|
||||
|
||||
Reads the agent's HEARTBEAT.md file and prepends it to the
|
||||
reasoning input, causing the agent to perform self-checks.
|
||||
|
||||
This enables "主动检查" (proactive monitoring) - periodic
|
||||
market condition and position checks during trading hours.
|
||||
"""
|
||||
|
||||
HEARTBEAT_FILE = "HEARTBEAT.md"
|
||||
|
||||
def __init__(self, workspace_dir: Path):
|
||||
"""Initialize heartbeat hook.
|
||||
|
||||
Args:
|
||||
workspace_dir: Working directory containing HEARTBEAT.md
|
||||
"""
|
||||
self.workspace_dir = Path(workspace_dir)
|
||||
self._completed_flag = self.workspace_dir / ".heartbeat_completed"
|
||||
|
||||
def _read_heartbeat_content(self) -> Optional[str]:
|
||||
"""Read HEARTBEAT.md if it exists and is non-empty.
|
||||
|
||||
Returns:
|
||||
The HEARTBEAT.md content stripped of whitespace, or None
|
||||
if the file is absent or empty.
|
||||
"""
|
||||
hb_path = self.workspace_dir / self.HEARTBEAT_FILE
|
||||
if not hb_path.exists():
|
||||
return None
|
||||
content = hb_path.read_text(encoding="utf-8").strip()
|
||||
return content if content else None
|
||||
|
||||
async def __call__(
|
||||
self,
|
||||
agent: "ReActAgent",
|
||||
kwargs: Dict[str, Any],
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""Prepend heartbeat task to user message.
|
||||
|
||||
Args:
|
||||
agent: The agent instance
|
||||
kwargs: Input arguments to the _reasoning method
|
||||
|
||||
Returns:
|
||||
Modified kwargs with heartbeat content prepended, or None
|
||||
if no HEARTBEAT.md content is available.
|
||||
"""
|
||||
try:
|
||||
content = self._read_heartbeat_content()
|
||||
if not content:
|
||||
return None
|
||||
|
||||
logger.debug(
|
||||
"Heartbeat: found HEARTBEAT.md for agent %s",
|
||||
getattr(agent, "agent_id", "unknown"),
|
||||
)
|
||||
|
||||
# Build heartbeat task instruction (Chinese)
|
||||
hb_task = (
|
||||
"# 定期主动检查\n\n"
|
||||
f"{content}\n\n"
|
||||
"请执行上述检查并报告结果。"
|
||||
)
|
||||
|
||||
# Inject into the first user message in memory
|
||||
if hasattr(agent, "memory") and agent.memory.content:
|
||||
system_count = sum(
|
||||
1 for msg, _ in agent.memory.content if msg.role == "system"
|
||||
)
|
||||
for msg, _ in agent.memory.content[system_count:]:
|
||||
if msg.role == "user":
|
||||
original_content = msg.content
|
||||
msg.content = hb_task + "\n\n" + original_content
|
||||
break
|
||||
|
||||
logger.debug(
|
||||
"Heartbeat task prepended for agent %s",
|
||||
getattr(agent, "agent_id", "unknown"),
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Heartbeat hook failed: %s", e, exc_info=True)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
__all__ = [
|
||||
"Hook",
|
||||
@@ -428,5 +696,7 @@ __all__ = [
|
||||
"HOOK_PRE_REASONING",
|
||||
"HOOK_POST_ACTING",
|
||||
"BootstrapHook",
|
||||
"HeartbeatHook",
|
||||
"MemoryCompactionHook",
|
||||
"WorkspaceWatchHook",
|
||||
]
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Agent Factory - Dynamic creation and management of EvoAgents."""
|
||||
|
||||
import logging
|
||||
import shutil
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
@@ -8,6 +9,8 @@ from typing import Any, Dict, List, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelConfig:
|
||||
@@ -342,9 +345,8 @@ class AgentFactory:
|
||||
"agent_type": config.get("agent_type", "unknown"),
|
||||
"config_path": str(config_path),
|
||||
})
|
||||
except Exception:
|
||||
# Skip invalid agent configs
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to load agent config {config_path}: {e}")
|
||||
|
||||
return agents
|
||||
|
||||
|
||||
@@ -4,7 +4,8 @@ Portfolio Manager Agent - Based on AgentScope ReActAgent
|
||||
Responsible for decision-making (NOT trade execution)
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, Optional
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional, Callable
|
||||
|
||||
from agentscope.agent import ReActAgent
|
||||
from agentscope.memory import InMemoryMemory, LongTermMemoryBase
|
||||
@@ -13,6 +14,8 @@ from agentscope.tool import Toolkit, ToolResponse
|
||||
|
||||
from ..utils.progress import progress
|
||||
from .prompt_factory import build_agent_system_prompt, clear_prompt_factory_cache
|
||||
from .team_pipeline_config import update_active_analysts
|
||||
from ..config.constants import ANALYST_TYPES
|
||||
|
||||
|
||||
class PMAgent(ReActAgent):
|
||||
@@ -61,6 +64,8 @@ class PMAgent(ReActAgent):
|
||||
"_toolkit_factory_kwargs",
|
||||
toolkit_factory_kwargs,
|
||||
)
|
||||
object.__setattr__(self, "_create_team_agent_cb", None)
|
||||
object.__setattr__(self, "_remove_team_agent_cb", None)
|
||||
|
||||
# Create toolkit after local state is ready so bound tool methods can be registered.
|
||||
if toolkit is None:
|
||||
@@ -152,6 +157,107 @@ class PMAgent(ReActAgent):
|
||||
],
|
||||
)
|
||||
|
||||
def _add_team_analyst(self, agent_id: str) -> ToolResponse:
|
||||
"""Add one analyst to active discussion team."""
|
||||
config_name = self.config.get("config_name", "default")
|
||||
project_root = Path(__file__).resolve().parents[2]
|
||||
active = update_active_analysts(
|
||||
project_root=project_root,
|
||||
config_name=config_name,
|
||||
available_analysts=list(ANALYST_TYPES.keys()),
|
||||
add=[agent_id],
|
||||
)
|
||||
return ToolResponse(
|
||||
content=[
|
||||
TextBlock(
|
||||
type="text",
|
||||
text=(
|
||||
f"Active analyst team updated. Added: {agent_id}. "
|
||||
f"Current active analysts: {', '.join(active)}"
|
||||
),
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
def _remove_team_analyst(self, agent_id: str) -> ToolResponse:
|
||||
"""Remove one analyst from active discussion team."""
|
||||
callback_msg = ""
|
||||
callback = self._remove_team_agent_cb
|
||||
if callback is not None:
|
||||
callback_msg = callback(agent_id=agent_id)
|
||||
|
||||
config_name = self.config.get("config_name", "default")
|
||||
project_root = Path(__file__).resolve().parents[2]
|
||||
active = update_active_analysts(
|
||||
project_root=project_root,
|
||||
config_name=config_name,
|
||||
available_analysts=list(ANALYST_TYPES.keys()),
|
||||
remove=[agent_id],
|
||||
)
|
||||
return ToolResponse(
|
||||
content=[
|
||||
TextBlock(
|
||||
type="text",
|
||||
text=(
|
||||
f"Active analyst team updated. Removed: {agent_id}. "
|
||||
f"Current active analysts: {', '.join(active)}"
|
||||
+ (f" | {callback_msg}" if callback_msg else "")
|
||||
),
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
def _set_active_analysts(self, agent_ids: str) -> ToolResponse:
|
||||
"""Set active analysts from comma-separated agent ids."""
|
||||
requested = [
|
||||
item.strip() for item in str(agent_ids or "").split(",") if item.strip()
|
||||
]
|
||||
config_name = self.config.get("config_name", "default")
|
||||
project_root = Path(__file__).resolve().parents[2]
|
||||
active = update_active_analysts(
|
||||
project_root=project_root,
|
||||
config_name=config_name,
|
||||
available_analysts=list(ANALYST_TYPES.keys()),
|
||||
set_to=requested,
|
||||
)
|
||||
return ToolResponse(
|
||||
content=[
|
||||
TextBlock(
|
||||
type="text",
|
||||
text=f"Active analyst team set to: {', '.join(active)}",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
def _create_team_analyst(self, agent_id: str, analyst_type: str) -> ToolResponse:
|
||||
"""Create a runtime analyst instance and activate it."""
|
||||
callback = self._create_team_agent_cb
|
||||
if callback is None:
|
||||
return ToolResponse(
|
||||
content=[
|
||||
TextBlock(
|
||||
type="text",
|
||||
text="Runtime agent creation is not available in current pipeline.",
|
||||
),
|
||||
],
|
||||
)
|
||||
result = callback(agent_id=agent_id, analyst_type=analyst_type)
|
||||
return ToolResponse(
|
||||
content=[
|
||||
TextBlock(type="text", text=result),
|
||||
],
|
||||
)
|
||||
|
||||
def set_team_controller(
|
||||
self,
|
||||
*,
|
||||
create_agent_callback: Optional[Callable[..., str]] = None,
|
||||
remove_agent_callback: Optional[Callable[..., str]] = None,
|
||||
) -> None:
|
||||
"""Inject runtime team lifecycle callbacks from pipeline."""
|
||||
object.__setattr__(self, "_create_team_agent_cb", create_agent_callback)
|
||||
object.__setattr__(self, "_remove_team_agent_cb", remove_agent_callback)
|
||||
|
||||
async def reply(self, x: Msg = None) -> Msg:
|
||||
"""
|
||||
Make investment decisions
|
||||
|
||||
@@ -50,7 +50,13 @@ def build_agent_system_prompt(
|
||||
toolkit: Any,
|
||||
analyst_type: Optional[str] = None,
|
||||
) -> str:
|
||||
"""Build the final system prompt for an agent."""
|
||||
"""Build the final system prompt for an agent.
|
||||
|
||||
Always reads fresh from disk — no caching.
|
||||
"""
|
||||
# Clear any cached templates before building (CoPaw-style, no caching)
|
||||
_prompt_loader.clear_cache()
|
||||
|
||||
sections: list[str] = []
|
||||
canonical_agent_id = (
|
||||
"portfolio_manager"
|
||||
|
||||
@@ -27,10 +27,6 @@ class PromptLoader:
|
||||
else:
|
||||
self.prompts_dir = Path(prompts_dir)
|
||||
|
||||
# Cache loaded prompts
|
||||
self._prompt_cache: Dict[str, str] = {}
|
||||
self._yaml_cache: Dict[str, Dict] = {}
|
||||
|
||||
def load_prompt(
|
||||
self,
|
||||
agent_type: str,
|
||||
@@ -38,37 +34,20 @@ class PromptLoader:
|
||||
variables: Optional[Dict[str, Any]] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Load and render Prompt
|
||||
Load and render Prompt.
|
||||
|
||||
Args:
|
||||
agent_type: Agent type (analyst, portfolio_manager, risk_manager)
|
||||
prompt_name: Prompt file name (without extension)
|
||||
variables: Variable dictionary for rendering Prompt
|
||||
|
||||
Returns:
|
||||
Rendered prompt string
|
||||
|
||||
Examples:
|
||||
loader = PromptLoader()
|
||||
prompt = loader.load_prompt("analyst", "tool_selection",
|
||||
{"analyst_persona": "Technical Analyst"})
|
||||
No caching — always reads fresh from disk (CoPaw-style).
|
||||
"""
|
||||
cache_key = f"{agent_type}/{prompt_name}"
|
||||
prompt_path = self.prompts_dir / agent_type / f"{prompt_name}.md"
|
||||
|
||||
# Try to load from cache
|
||||
if cache_key not in self._prompt_cache:
|
||||
prompt_path = self.prompts_dir / agent_type / f"{prompt_name}.md"
|
||||
if not prompt_path.exists():
|
||||
raise FileNotFoundError(
|
||||
f"Prompt file not found: {prompt_path}\n"
|
||||
f"Please create the prompt file or check the path.",
|
||||
)
|
||||
|
||||
if not prompt_path.exists():
|
||||
raise FileNotFoundError(
|
||||
f"Prompt file not found: {prompt_path}\n"
|
||||
f"Please create the prompt file or check the path.",
|
||||
)
|
||||
|
||||
with open(prompt_path, "r", encoding="utf-8") as f:
|
||||
self._prompt_cache[cache_key] = f.read()
|
||||
|
||||
prompt_template = self._prompt_cache[cache_key]
|
||||
with open(prompt_path, "r", encoding="utf-8") as f:
|
||||
prompt_template = f.read()
|
||||
|
||||
# If variables provided, use simple string replacement
|
||||
if variables:
|
||||
@@ -76,8 +55,6 @@ class PromptLoader:
|
||||
else:
|
||||
rendered = prompt_template
|
||||
|
||||
# Smart escaping: escape braces in JSON code blocks
|
||||
# rendered = self._escape_json_braces(rendered)
|
||||
return rendered
|
||||
|
||||
def _render_template(
|
||||
@@ -140,45 +117,26 @@ class PromptLoader:
|
||||
config_name: str,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Load YAML configuration file
|
||||
Load YAML configuration file.
|
||||
|
||||
Args:
|
||||
agent_type: Agent type
|
||||
config_name: Configuration file name (without extension)
|
||||
|
||||
Returns:
|
||||
Configuration dictionary
|
||||
|
||||
Examples:
|
||||
>>> loader = PromptLoader()
|
||||
>>> config = loader.load_yaml_config("analyst", "personas")
|
||||
No caching — always reads fresh from disk (CoPaw-style).
|
||||
"""
|
||||
cache_key = f"{agent_type}/{config_name}"
|
||||
yaml_path = self.prompts_dir / agent_type / f"{config_name}.yaml"
|
||||
|
||||
if cache_key not in self._yaml_cache:
|
||||
yaml_path = self.prompts_dir / agent_type / f"{config_name}.yaml"
|
||||
if not yaml_path.exists():
|
||||
raise FileNotFoundError(f"YAML config not found: {yaml_path}")
|
||||
|
||||
if not yaml_path.exists():
|
||||
raise FileNotFoundError(f"YAML config not found: {yaml_path}")
|
||||
|
||||
with open(yaml_path, "r", encoding="utf-8") as f:
|
||||
self._yaml_cache[cache_key] = yaml.safe_load(f)
|
||||
|
||||
return self._yaml_cache[cache_key]
|
||||
with open(yaml_path, "r", encoding="utf-8") as f:
|
||||
return yaml.safe_load(f) or {}
|
||||
|
||||
def clear_cache(self):
|
||||
"""Clear cache (for hot reload)"""
|
||||
self._prompt_cache.clear()
|
||||
self._yaml_cache.clear()
|
||||
"""No-op — caching removed (CoPaw-style, always fresh reads)."""
|
||||
pass
|
||||
|
||||
def reload_prompt(self, agent_type: str, prompt_name: str):
|
||||
"""Reload specified prompt (force cache refresh)"""
|
||||
cache_key = f"{agent_type}/{prompt_name}"
|
||||
if cache_key in self._prompt_cache:
|
||||
del self._prompt_cache[cache_key]
|
||||
"""No-op — caching removed."""
|
||||
pass
|
||||
|
||||
def reload_config(self, agent_type: str, config_name: str):
|
||||
"""Reload specified configuration (force cache refresh)"""
|
||||
cache_key = f"{agent_type}/{config_name}"
|
||||
if cache_key in self._yaml_cache:
|
||||
del self._yaml_cache[cache_key]
|
||||
"""No-op — caching removed."""
|
||||
pass
|
||||
|
||||
@@ -19,6 +19,8 @@ class SkillMetadata:
|
||||
description: str
|
||||
version: str = ""
|
||||
tools: List[str] = field(default_factory=list)
|
||||
allowed_tools: List[str] = field(default_factory=list)
|
||||
denied_tools: List[str] = field(default_factory=list)
|
||||
|
||||
|
||||
def parse_skill_metadata(skill_dir: Path, source: str) -> SkillMetadata:
|
||||
@@ -60,6 +62,8 @@ def parse_skill_metadata(skill_dir: Path, source: str) -> SkillMetadata:
|
||||
description=description,
|
||||
version=str(frontmatter.get("version") or "").strip(),
|
||||
tools=_string_list(frontmatter.get("tools")),
|
||||
allowed_tools=_string_list(frontmatter.get("allowed_tools")),
|
||||
denied_tools=_string_list(frontmatter.get("denied_tools")),
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -3,14 +3,29 @@
|
||||
|
||||
from pathlib import Path
|
||||
import shutil
|
||||
from typing import Dict, Iterable, List
|
||||
import tempfile
|
||||
import zipfile
|
||||
from typing import Any, Dict, Iterable, Iterator, List, Optional, Set
|
||||
from urllib.parse import urlparse
|
||||
from urllib.request import urlretrieve
|
||||
|
||||
import yaml
|
||||
|
||||
from backend.agents.agent_workspace import load_agent_workspace_config
|
||||
from backend.agents.skill_metadata import SkillMetadata, parse_skill_metadata
|
||||
from backend.agents.skill_loader import validate_skill
|
||||
from backend.config.bootstrap_config import get_bootstrap_config_for_run
|
||||
|
||||
try:
|
||||
from watchdog.observers import Observer
|
||||
from watchdog.events import FileSystemEventHandler, FileSystemEvent
|
||||
WATCHDOG_AVAILABLE = True
|
||||
except ImportError:
|
||||
WATCHDOG_AVAILABLE = False
|
||||
Observer = None
|
||||
FileSystemEventHandler = object
|
||||
FileSystemEvent = object # type: ignore[misc,assignment]
|
||||
|
||||
|
||||
class SkillsManager:
|
||||
"""Sync named skills into a run-scoped active skills workspace."""
|
||||
@@ -178,6 +193,57 @@ class SkillsManager:
|
||||
)
|
||||
return skill_dir
|
||||
|
||||
def install_external_skill_for_agent(
|
||||
self,
|
||||
config_name: str,
|
||||
agent_id: str,
|
||||
source: str,
|
||||
*,
|
||||
skill_name: str | None = None,
|
||||
activate: bool = True,
|
||||
) -> Dict[str, object]:
|
||||
"""
|
||||
Install an external skill into one agent's local skill space.
|
||||
|
||||
Supports:
|
||||
- local skill directory containing SKILL.md
|
||||
- local zip archive containing one skill directory
|
||||
- http(s) URL to zip archive
|
||||
"""
|
||||
source_path = self._resolve_external_source_path(source)
|
||||
skill_dir = self._resolve_external_skill_dir(source_path)
|
||||
metadata = parse_skill_metadata(skill_dir, source="external")
|
||||
final_name = _normalize_skill_name(skill_name or metadata.skill_name or skill_dir.name)
|
||||
if not final_name:
|
||||
raise ValueError("Could not determine skill name from external source.")
|
||||
|
||||
target_dir = self.get_agent_local_root(config_name, agent_id) / final_name
|
||||
target_dir.parent.mkdir(parents=True, exist_ok=True)
|
||||
if target_dir.exists():
|
||||
shutil.rmtree(target_dir)
|
||||
shutil.copytree(skill_dir, target_dir)
|
||||
|
||||
validation = validate_skill(target_dir)
|
||||
if not validation.get("valid", False):
|
||||
shutil.rmtree(target_dir, ignore_errors=True)
|
||||
raise ValueError(
|
||||
"Installed skill is invalid: "
|
||||
+ "; ".join(validation.get("errors", []))
|
||||
)
|
||||
|
||||
if activate:
|
||||
self.update_agent_skill_overrides(
|
||||
config_name=config_name,
|
||||
agent_id=agent_id,
|
||||
enable=[final_name],
|
||||
)
|
||||
return {
|
||||
"skill_name": final_name,
|
||||
"target_dir": str(target_dir),
|
||||
"activated": activate,
|
||||
"warnings": validation.get("warnings", []),
|
||||
}
|
||||
|
||||
def update_agent_local_skill(
|
||||
self,
|
||||
config_name: str,
|
||||
@@ -239,6 +305,58 @@ class SkillsManager:
|
||||
"content": body,
|
||||
}
|
||||
|
||||
def _resolve_external_source_path(self, source: str) -> Path:
|
||||
"""Resolve source into a local path; download URL when needed."""
|
||||
parsed = urlparse(source)
|
||||
if parsed.scheme in {"http", "https"}:
|
||||
suffix = Path(parsed.path).suffix or ".zip"
|
||||
with tempfile.NamedTemporaryFile(suffix=suffix, delete=False) as tmp:
|
||||
temp_path = Path(tmp.name)
|
||||
urlretrieve(source, temp_path)
|
||||
return temp_path
|
||||
return Path(source).expanduser().resolve()
|
||||
|
||||
def _resolve_external_skill_dir(self, source_path: Path) -> Path:
|
||||
"""Resolve external source path to a skill directory containing SKILL.md."""
|
||||
if not source_path.exists():
|
||||
raise FileNotFoundError(f"Source does not exist: {source_path}")
|
||||
|
||||
if source_path.is_dir():
|
||||
if (source_path / "SKILL.md").exists():
|
||||
return source_path
|
||||
children = [
|
||||
item for item in source_path.iterdir()
|
||||
if item.is_dir() and (item / "SKILL.md").exists()
|
||||
]
|
||||
if len(children) == 1:
|
||||
return children[0]
|
||||
raise ValueError(
|
||||
"Source directory must contain SKILL.md "
|
||||
"or exactly one child directory containing SKILL.md."
|
||||
)
|
||||
|
||||
if source_path.suffix.lower() != ".zip":
|
||||
raise ValueError("External source file must be a .zip archive.")
|
||||
|
||||
temp_root = Path(tempfile.mkdtemp(prefix="external_skill_"))
|
||||
with zipfile.ZipFile(source_path, "r") as archive:
|
||||
archive.extractall(temp_root)
|
||||
|
||||
candidates = [
|
||||
item.parent
|
||||
for item in temp_root.rglob("SKILL.md")
|
||||
if item.is_file()
|
||||
]
|
||||
unique = []
|
||||
for item in candidates:
|
||||
if item not in unique:
|
||||
unique.append(item)
|
||||
if len(unique) != 1:
|
||||
raise ValueError(
|
||||
"Zip archive must contain exactly one skill directory with SKILL.md."
|
||||
)
|
||||
return unique[0]
|
||||
|
||||
def update_agent_skill_overrides(
|
||||
self,
|
||||
config_name: str,
|
||||
@@ -500,6 +618,7 @@ class SkillsManager:
|
||||
self,
|
||||
config_name: str,
|
||||
agent_defaults: Dict[str, Iterable[str]],
|
||||
auto_reload: bool = False,
|
||||
) -> Dict[str, List[Path]]:
|
||||
"""Resolve all agent skills into per-agent installed/active workspaces."""
|
||||
resolved: Dict[str, List[str]] = {}
|
||||
@@ -574,6 +693,9 @@ class SkillsManager:
|
||||
skill_sources=disabled_sources,
|
||||
)
|
||||
|
||||
if auto_reload:
|
||||
self.watch_active_skills(config_name, agent_defaults)
|
||||
|
||||
return active_map
|
||||
|
||||
def _is_shared_skill(self, skill_name: str) -> bool:
|
||||
@@ -583,6 +705,72 @@ class SkillsManager:
|
||||
return False
|
||||
return True
|
||||
|
||||
def watch_active_skills(
|
||||
self,
|
||||
config_name: str,
|
||||
agent_defaults: Dict[str, Iterable[str]],
|
||||
callback: Optional[Any] = None,
|
||||
) -> "_SkillsWatcher":
|
||||
"""Start file system monitoring on active skill directories.
|
||||
|
||||
Args:
|
||||
config_name: Run configuration name.
|
||||
agent_defaults: Map of agent_id -> default skill names.
|
||||
callback: Optional callable invoked on file changes with
|
||||
(changed_paths: List[Path]).
|
||||
|
||||
Returns:
|
||||
A _SkillsWatcher instance. Call .stop() to halt monitoring.
|
||||
"""
|
||||
if not WATCHDOG_AVAILABLE:
|
||||
raise ImportError(
|
||||
"watchdog is required for watch_active_skills. "
|
||||
"Install it with: pip install watchdog"
|
||||
)
|
||||
|
||||
watched_paths: List[Path] = []
|
||||
for agent_id in agent_defaults:
|
||||
active_root = self.get_agent_active_root(config_name, agent_id)
|
||||
if active_root.exists():
|
||||
watched_paths.append(active_root)
|
||||
local_root = self.get_agent_local_root(config_name, agent_id)
|
||||
if local_root.exists():
|
||||
watched_paths.append(local_root)
|
||||
|
||||
handler = _SkillsChangeHandler(watched_paths, callback)
|
||||
observer = Observer()
|
||||
for path in watched_paths:
|
||||
observer.schedule(handler, str(path), recursive=True)
|
||||
observer.start()
|
||||
return _SkillsWatcher(observer, handler)
|
||||
|
||||
def reload_skills_if_changed(
|
||||
self,
|
||||
config_name: str,
|
||||
agent_defaults: Dict[str, Iterable[str]],
|
||||
) -> Dict[str, List[Path]]:
|
||||
"""Check for file changes and reload active skills if needed.
|
||||
|
||||
Args:
|
||||
config_name: Run configuration name.
|
||||
agent_defaults: Map of agent_id -> default skill names.
|
||||
|
||||
Returns:
|
||||
Map of agent_id -> list of reloaded skill paths, or empty dict
|
||||
if no changes were detected.
|
||||
"""
|
||||
changed = self._pending_skill_changes.get(config_name)
|
||||
if not changed:
|
||||
return {}
|
||||
|
||||
self._pending_skill_changes[config_name] = set()
|
||||
return self.prepare_active_skills(config_name, agent_defaults)
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Internal change-tracking state (populated by _SkillsChangeHandler)
|
||||
# -------------------------------------------------------------------------
|
||||
_pending_skill_changes: Dict[str, Set[Path]] = {}
|
||||
|
||||
def _resolve_disabled_skill_names(
|
||||
self,
|
||||
config_name: str,
|
||||
@@ -613,6 +801,53 @@ class SkillsManager:
|
||||
]
|
||||
|
||||
|
||||
class _SkillsWatcher:
|
||||
"""Handle returned by watch_active_skills; call .stop() to halt monitoring."""
|
||||
|
||||
def __init__(self, observer: Observer, handler: "_SkillsChangeHandler") -> None:
|
||||
self._observer = observer
|
||||
self._handler = handler
|
||||
|
||||
def stop(self) -> None:
|
||||
"""Stop the file system observer."""
|
||||
self._observer.stop()
|
||||
self._observer.join()
|
||||
|
||||
|
||||
class _SkillsChangeHandler(FileSystemEventHandler):
|
||||
"""Collects file-change events on skill directories."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
watched_paths: List[Path],
|
||||
callback: Optional[Any] = None,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self._watched_paths = watched_paths
|
||||
self._callback = callback
|
||||
|
||||
def on_any_event(self, event: FileSystemEvent) -> None:
|
||||
if event.is_directory:
|
||||
return
|
||||
src_path = Path(event.src_path)
|
||||
for watched in self._watched_paths:
|
||||
if src_path.is_relative_to(watched):
|
||||
SkillsManager._pending_skill_changes.setdefault(
|
||||
self._run_id_from_path(src_path), set()
|
||||
).add(src_path)
|
||||
if self._callback:
|
||||
self._callback([src_path])
|
||||
break
|
||||
|
||||
@staticmethod
|
||||
def _run_id_from_path(path: Path) -> str:
|
||||
"""Infer config_name from a path like runs/{config_name}/skills/active/..."""
|
||||
parts = path.parts
|
||||
for i, part in enumerate(parts):
|
||||
if part == "runs" and i + 1 < len(parts):
|
||||
return parts[i + 1]
|
||||
return "default"
|
||||
|
||||
def _dedupe_preserve_order(items: Iterable[str]) -> List[str]:
|
||||
result: List[str] = []
|
||||
for item in items:
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
以及合并Agent特定工具。
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, Iterable, List, Optional
|
||||
from typing import Any, Dict, Iterable, List, Optional, Set
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
@@ -13,6 +13,7 @@ import yaml
|
||||
from backend.agents.agent_workspace import load_agent_workspace_config
|
||||
from backend.agents.skills_manager import SkillsManager
|
||||
from backend.agents.skill_loader import load_skill_from_dir, get_skill_tools
|
||||
from backend.agents.skill_metadata import parse_skill_metadata
|
||||
from backend.config.bootstrap_config import get_bootstrap_config_for_run
|
||||
|
||||
|
||||
@@ -117,6 +118,26 @@ def _register_portfolio_tool_groups(toolkit: Any, pm_agent: Any) -> None:
|
||||
pm_agent._make_decision,
|
||||
group_name="portfolio_ops",
|
||||
)
|
||||
if hasattr(pm_agent, "_add_team_analyst"):
|
||||
toolkit.register_tool_function(
|
||||
pm_agent._add_team_analyst,
|
||||
group_name="portfolio_ops",
|
||||
)
|
||||
if hasattr(pm_agent, "_remove_team_analyst"):
|
||||
toolkit.register_tool_function(
|
||||
pm_agent._remove_team_analyst,
|
||||
group_name="portfolio_ops",
|
||||
)
|
||||
if hasattr(pm_agent, "_set_active_analysts"):
|
||||
toolkit.register_tool_function(
|
||||
pm_agent._set_active_analysts,
|
||||
group_name="portfolio_ops",
|
||||
)
|
||||
if hasattr(pm_agent, "_create_team_analyst"):
|
||||
toolkit.register_tool_function(
|
||||
pm_agent._create_team_analyst,
|
||||
group_name="portfolio_ops",
|
||||
)
|
||||
|
||||
|
||||
def _register_risk_tool_groups(toolkit: Any) -> None:
|
||||
@@ -223,6 +244,8 @@ def create_agent_toolkit(
|
||||
for skill_dir in active_skill_dirs:
|
||||
toolkit.register_agent_skill(str(skill_dir))
|
||||
|
||||
apply_skill_tool_restrictions(toolkit, active_skill_dirs)
|
||||
|
||||
if active_groups:
|
||||
toolkit.update_tool_groups(group_names=active_groups, active=True)
|
||||
|
||||
@@ -309,6 +332,8 @@ def create_toolkit_from_workspace(
|
||||
for skill_dir in skill_dirs:
|
||||
toolkit.register_agent_skill(str(skill_dir))
|
||||
|
||||
apply_skill_tool_restrictions(toolkit, skill_dirs)
|
||||
|
||||
# 激活指定的工具组
|
||||
if active_groups is None:
|
||||
# 从配置中读取
|
||||
@@ -397,3 +422,96 @@ def refresh_toolkit_skills(
|
||||
for skill_dir in sorted(local_root.iterdir()):
|
||||
if skill_dir.is_dir() and (skill_dir / "SKILL.md").exists():
|
||||
toolkit.register_agent_skill(str(skill_dir))
|
||||
|
||||
|
||||
def apply_skill_tool_restrictions(toolkit: Any, skill_dirs: List[Path]) -> None:
|
||||
"""Apply per-skill allowed_tools / denied_tools restrictions to a toolkit.
|
||||
|
||||
If a skill specifies allowed_tools, only those tools are accessible when
|
||||
that skill is active. If a skill specifies denied_tools, those tools are
|
||||
removed regardless of allowed_tools. Denied tools take precedence.
|
||||
|
||||
This function annotates the toolkit with a _skill_tool_restrictions map
|
||||
that downstream code can consult when resolving available tools.
|
||||
|
||||
Args:
|
||||
toolkit: The agentscope Toolkit instance.
|
||||
skill_dirs: List of skill directory paths to inspect.
|
||||
"""
|
||||
restrictions: Dict[str, Dict[str, Set[str]]] = {}
|
||||
for skill_dir in skill_dirs:
|
||||
metadata = parse_skill_metadata(skill_dir, source="active")
|
||||
if not metadata.allowed_tools and not metadata.denied_tools:
|
||||
continue
|
||||
restrictions[skill_dir.name] = {
|
||||
"allowed": set(metadata.allowed_tools),
|
||||
"denied": set(metadata.denied_tools),
|
||||
}
|
||||
if hasattr(toolkit, "agent_skills"):
|
||||
for skill in toolkit.agent_skills:
|
||||
skill_name = getattr(skill, "name", "") or ""
|
||||
if skill_name in restrictions:
|
||||
setattr(
|
||||
skill,
|
||||
"_tool_allowed",
|
||||
restrictions[skill_name]["allowed"],
|
||||
)
|
||||
setattr(
|
||||
skill,
|
||||
"_tool_denied",
|
||||
restrictions[skill_name]["denied"],
|
||||
)
|
||||
|
||||
|
||||
def get_skill_effective_tools(skill: Any) -> Optional[Set[str]]:
|
||||
"""Return the effective tool set for a skill after applying restrictions.
|
||||
|
||||
If the skill has no restrictions (no allowed_tools / denied_tools),
|
||||
returns None to indicate "all tools allowed".
|
||||
|
||||
If allowed_tools is set, returns only those tools minus denied_tools.
|
||||
If only denied_tools is set, returns all tools minus denied_tools.
|
||||
|
||||
Args:
|
||||
skill: A skill object previously registered via register_agent_skill.
|
||||
|
||||
Returns:
|
||||
A set of allowed tool names, or None if unrestricted.
|
||||
"""
|
||||
allowed = getattr(skill, "_tool_allowed", None)
|
||||
denied = getattr(skill, "_tool_denied", set())
|
||||
|
||||
if allowed is None:
|
||||
return None
|
||||
|
||||
effective = allowed - denied
|
||||
return effective
|
||||
|
||||
|
||||
def filter_toolkit_by_skill(
|
||||
toolkit: Any,
|
||||
skill_name: str,
|
||||
) -> Set[str]:
|
||||
"""Return the set of tool names that are accessible for a given skill.
|
||||
|
||||
Args:
|
||||
toolkit: The agentscope Toolkit instance.
|
||||
skill_name: Name of the skill to query.
|
||||
|
||||
Returns:
|
||||
Set of allowed tool names, or all registered tool names if unrestricted.
|
||||
"""
|
||||
if not hasattr(toolkit, "agent_skills"):
|
||||
return set()
|
||||
|
||||
for skill in toolkit.agent_skills:
|
||||
name = getattr(skill, "name", "") or ""
|
||||
if name != skill_name:
|
||||
continue
|
||||
effective = get_skill_effective_tools(skill)
|
||||
if effective is None:
|
||||
return set()
|
||||
return effective
|
||||
|
||||
return set()
|
||||
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Workspace Manager - Create and manage agent workspaces."""
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class WorkspaceConfig:
|
||||
@@ -123,9 +126,8 @@ class WorkspaceRegistry:
|
||||
with open(config_path, "r", encoding="utf-8") as f:
|
||||
data = yaml.safe_load(f) or {}
|
||||
workspaces.append(WorkspaceConfig.from_dict(data))
|
||||
except Exception:
|
||||
# Skip invalid workspace configs
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to load workspace config {config_path}: {e}")
|
||||
|
||||
return workspaces
|
||||
|
||||
@@ -167,9 +169,8 @@ class WorkspaceRegistry:
|
||||
"agent_type": config.get("agent_type", "unknown"),
|
||||
"config_path": str(config_path),
|
||||
})
|
||||
except Exception:
|
||||
# Skip invalid agent configs
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to load agent config {config_path}: {e}")
|
||||
|
||||
return agents
|
||||
|
||||
@@ -294,8 +295,8 @@ class WorkspaceRegistry:
|
||||
try:
|
||||
with open(config_path, "r", encoding="utf-8") as f:
|
||||
current_config = yaml.safe_load(f) or {}
|
||||
except Exception:
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to load existing config {config_path}: {e}")
|
||||
|
||||
# Update fields
|
||||
if name is not None:
|
||||
|
||||
@@ -7,6 +7,7 @@ from typing import Dict, Iterable, Optional
|
||||
import yaml
|
||||
|
||||
from .skills_manager import SkillsManager
|
||||
from .team_pipeline_config import ensure_team_pipeline_config
|
||||
|
||||
|
||||
class RunWorkspaceManager:
|
||||
@@ -23,6 +24,16 @@ class RunWorkspaceManager:
|
||||
run_dir = self.get_run_dir(config_name)
|
||||
run_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.skills_manager.ensure_activation_manifest(config_name)
|
||||
ensure_team_pipeline_config(
|
||||
project_root=self.project_root,
|
||||
config_name=config_name,
|
||||
default_analysts=[
|
||||
"fundamentals_analyst",
|
||||
"technical_analyst",
|
||||
"sentiment_analyst",
|
||||
"valuation_analyst",
|
||||
],
|
||||
)
|
||||
bootstrap_path = run_dir / "BOOTSTRAP.md"
|
||||
if not bootstrap_path.exists():
|
||||
bootstrap_path.write_text(
|
||||
|
||||
Reference in New Issue
Block a user