feat(agent): complete EvoAgent integration for all 6 agent roles

Migrate all agent roles from Legacy to EvoAgent architecture:
- fundamentals_analyst, technical_analyst, sentiment_analyst, valuation_analyst
- risk_manager, portfolio_manager

Key changes:
- EvoAgent now supports Portfolio Manager compatibility methods (_make_decision,
  get_decisions, get_portfolio_state, load_portfolio_state, update_portfolio)
- Add UnifiedAgentFactory for centralized agent creation
- ToolGuard with batch approval API and WebSocket broadcast
- Legacy agents marked deprecated (AnalystAgent, RiskAgent, PMAgent)
- Remove backend/agents/compat.py migration shim
- Add run_id alongside workspace_id for semantic clarity
- Complete integration test coverage (13 tests)
- All smoke tests passing for 6 agent roles

Constraint: Must maintain backward compatibility with existing run configs
Constraint: Memory support must work with EvoAgent (no fallback to Legacy)
Rejected: Separate PM implementation for EvoAgent | unified approach cleaner
Confidence: high
Scope-risk: broad
Directive: EVO_AGENT_IDS env var still respected but defaults to all roles
Not-tested: Kubernetes sandbox mode for skill execution
This commit is contained in:
2026-04-02 00:55:08 +08:00
parent 0fa413380c
commit 16b54d5ccc
73 changed files with 9454 additions and 904 deletions

View File

@@ -26,13 +26,45 @@ from backend.agents.team_pipeline_config import (
resolve_active_analysts,
update_active_analysts,
)
from backend.agents import AnalystAgent
from backend.agents import AnalystAgent, EvoAgent
from backend.agents.agent_workspace import load_agent_workspace_config
from backend.agents.toolkit_factory import create_agent_toolkit
from backend.agents.workspace_manager import WorkspaceManager
from backend.agents.prompt_loader import get_prompt_loader
from backend.llm.models import get_agent_formatter, get_agent_model
from backend.config.constants import ANALYST_TYPES
def _resolve_evo_agent_ids() -> set[str]:
"""Return agent ids selected to use EvoAgent.
By default, all supported roles use EvoAgent.
EVO_AGENT_IDS can be used to limit to specific roles.
Supported roles:
- analyst roles (fundamentals, technical, sentiment, valuation)
- risk_manager
- portfolio_manager
Example:
EVO_AGENT_IDS=fundamentals_analyst,risk_manager,portfolio_manager
"""
raw = os.getenv("EVO_AGENT_IDS", "")
if not raw.strip():
# Default: all supported roles use EvoAgent
return set(ANALYST_TYPES) | {"risk_manager", "portfolio_manager"}
requested = {
item.strip()
for item in raw.split(",")
if item.strip()
}
return {
agent_id
for agent_id in requested
if agent_id in ANALYST_TYPES or agent_id in {"risk_manager", "portfolio_manager"}
}
# Team infrastructure imports (graceful import - may not exist yet)
try:
from backend.agents.team.team_coordinator import TeamCoordinator
@@ -140,6 +172,10 @@ class TradingPipeline:
session_key = TradingSessionKey(date=date).key()
self._session_key = session_key
active_analysts = self._get_active_analysts()
self._sync_agent_runtime_context(
agents=active_analysts + [self.risk_manager, self.pm],
session_key=session_key,
)
if self.runtime_manager:
self.runtime_manager.set_session_key(session_key)
self._runtime_log_event("cycle:start", {"tickers": tickers, "date": date})
@@ -1488,108 +1524,6 @@ class TradingPipeline:
return "Decisions: " + "; ".join(decision_texts)
return "Portfolio analysis completed. No trades recommended."
def load_agents_from_workspace(
self,
workspace_id: str,
agent_factory: Optional[Any] = None,
) -> Dict[str, Any]:
"""
Load agents from workspace using AgentFactory.
This method supports the new EvoAgent architecture by loading
agents from a workspace instead of using hardcoded agents.
Args:
workspace_id: Workspace identifier
agent_factory: Optional AgentFactory instance (uses self.agent_factory if None)
Returns:
Dictionary with loaded agents:
{
"analysts": List[EvoAgent],
"risk_manager": EvoAgent,
"portfolio_manager": EvoAgent,
}
Raises:
ValueError: If workspace doesn't exist or no agents found
"""
factory = agent_factory or self.agent_factory
if factory is None:
from backend.agents import AgentFactory
factory = AgentFactory()
# Check workspace exists
if not factory.workspaces_root.exists():
raise ValueError(f"Workspaces root does not exist: {factory.workspaces_root}")
workspace_dir = factory.workspaces_root / workspace_id
if not workspace_dir.exists():
raise ValueError(f"Workspace '{workspace_id}' does not exist")
# Load agents from workspace
agents_data = factory.list_agents(workspace_id=workspace_id)
if not agents_data:
raise ValueError(f"No agents found in workspace '{workspace_id}'")
# Categorize agents by type
analysts = []
risk_manager = None
portfolio_manager = None
for agent_data in agents_data:
agent_type = agent_data.get("agent_type", "unknown")
agent_id = agent_data.get("agent_id")
# Load full agent configuration
config_path = Path(agent_data.get("config_path", ""))
if config_path.exists():
agent = factory.load_agent(agent_id, workspace_id)
if agent_type.endswith("_analyst"):
analysts.append(agent)
elif agent_type == "risk_manager":
risk_manager = agent
elif agent_type == "portfolio_manager":
portfolio_manager = agent
if not analysts:
raise ValueError(f"No analysts found in workspace '{workspace_id}'")
if risk_manager is None:
raise ValueError(f"No risk_manager found in workspace '{workspace_id}'")
if portfolio_manager is None:
raise ValueError(f"No portfolio_manager found in workspace '{workspace_id}'")
return {
"analysts": analysts,
"risk_manager": risk_manager,
"portfolio_manager": portfolio_manager,
}
def reload_agents_from_workspace(self, workspace_id: Optional[str] = None) -> None:
"""
Reload all agents from workspace.
This updates self.analysts, self.risk_manager, and self.pm
with agents loaded from the specified workspace.
Args:
workspace_id: Workspace ID (uses self.workspace_id if None)
"""
ws_id = workspace_id or self.workspace_id
if not ws_id:
raise ValueError("No workspace_id specified")
loaded = self.load_agents_from_workspace(ws_id)
self.analysts = loaded["analysts"]
self.risk_manager = loaded["risk_manager"]
self.pm = loaded["portfolio_manager"]
self.workspace_id = ws_id
logger.info(f"Reloaded {len(self.analysts)} analysts from workspace '{ws_id}'")
def _runtime_update_status(self, agent: Any, status: str) -> None:
if not self.runtime_manager:
return
@@ -1602,6 +1536,28 @@ class TradingPipeline:
for agent in agents:
self._runtime_update_status(agent, status)
def _sync_agent_runtime_context(
self,
agents: List[Any],
session_key: str,
) -> None:
"""Propagate run/session identifiers onto agent instances.
EvoAgent's tool-guard approval records depend on workspace/session
context being present on the agent object at runtime.
"""
config_name = getattr(self.pm, "config", {}).get("config_name", "default")
for agent in agents:
try:
setattr(agent, "session_id", session_key)
if not getattr(agent, "run_id", None):
setattr(agent, "run_id", config_name)
# Keep workspace_id for backward compatibility
if not getattr(agent, "workspace_id", None):
setattr(agent, "workspace_id", config_name)
except Exception:
continue
def _all_analysts(self) -> List[Any]:
"""Return static analysts plus runtime-created analysts."""
return list(self.analysts) + list(self._dynamic_analysts.values())
@@ -1630,18 +1586,46 @@ class TradingPipeline:
),
)
agent = AnalystAgent(
analyst_type=analyst_type,
toolkit=create_agent_toolkit(
# Determine whether to use EvoAgent based on EVO_AGENT_IDS
use_evo_agent = analyst_type in _resolve_evo_agent_ids()
if use_evo_agent:
from backend.agents.skills_manager import SkillsManager
skills_manager = SkillsManager(project_root=project_root)
workspace_dir = skills_manager.get_agent_asset_dir(
config_name,
agent_id,
)
agent_config = load_agent_workspace_config(workspace_dir / "agent.yaml")
agent = EvoAgent(
agent_id=agent_id,
config_name=config_name,
workspace_dir=workspace_dir,
model=get_agent_model(analyst_type),
formatter=get_agent_formatter(analyst_type),
prompt_files=agent_config.prompt_files,
)
agent.toolkit = create_agent_toolkit(
agent_id=agent_id,
config_name=config_name,
active_skill_dirs=[],
),
model=get_agent_model(analyst_type),
formatter=get_agent_formatter(analyst_type),
agent_id=agent_id,
config={"config_name": config_name},
)
)
setattr(agent, "run_id", config_name)
# Keep workspace_id for backward compatibility
setattr(agent, "workspace_id", config_name)
else:
agent = AnalystAgent(
analyst_type=analyst_type,
toolkit=create_agent_toolkit(
agent_id=agent_id,
config_name=config_name,
active_skill_dirs=[],
),
model=get_agent_model(analyst_type),
formatter=get_agent_formatter(analyst_type),
agent_id=agent_id,
config={"config_name": config_name},
)
self._dynamic_analysts[agent_id] = agent
update_active_analysts(
project_root=project_root,

View File

@@ -12,9 +12,10 @@ import asyncio
import os
from contextlib import AsyncExitStack
from pathlib import Path
from typing import Any, Dict, Optional, Callable
from typing import Any, Dict, List, Optional, Callable
from backend.agents import AnalystAgent, PMAgent, RiskAgent
from backend.agents import AnalystAgent, EvoAgent, PMAgent, RiskAgent
from backend.agents.agent_workspace import load_agent_workspace_config
from backend.agents.skills_manager import SkillsManager
from backend.agents.toolkit_factory import create_agent_toolkit, load_agent_profiles
from backend.agents.prompt_loader import get_prompt_loader
@@ -41,6 +42,9 @@ _prompt_loader = get_prompt_loader()
# Global gateway reference for cleanup
_gateway_instance: Optional[Gateway] = None
# Global long-term memory references for persistence
_long_term_memories: List[Any] = []
def _set_gateway(gateway: Optional[Gateway]) -> None:
"""Set global gateway reference."""
@@ -61,6 +65,101 @@ def stop_gateway() -> None:
_gateway_instance = None
def _set_long_term_memories(memories: List[Any]) -> None:
"""Set global long-term memory references."""
global _long_term_memories
_long_term_memories = memories
def _clear_long_term_memories() -> None:
"""Clear global long-term memory references."""
global _long_term_memories
_long_term_memories = []
def _persist_long_term_memories_sync() -> None:
"""
Synchronously persist all long-term memories before shutdown.
This function ensures all memory data is flushed to disk/vector store
before the process exits. Should be called during cleanup.
"""
global _long_term_memories
if not _long_term_memories:
return
import logging
logger = logging.getLogger(__name__)
logger.info(f"[MemoryPersistence] Persisting {len(_long_term_memories)} memory instances...")
for i, memory in enumerate(_long_term_memories):
try:
# Try to save memory if it has a save method
if hasattr(memory, 'save') and callable(getattr(memory, 'save')):
if hasattr(memory, 'sync') and callable(getattr(memory, 'sync')):
# Use sync version if available
memory.sync()
logger.debug(f"[MemoryPersistence] Synced memory {i}")
else:
# Try async save with event loop
import asyncio
try:
loop = asyncio.get_event_loop()
if loop.is_running():
# Schedule save in running loop
loop.create_task(memory.save())
logger.debug(f"[MemoryPersistence] Scheduled save for memory {i}")
else:
loop.run_until_complete(memory.save())
logger.debug(f"[MemoryPersistence] Saved memory {i}")
except RuntimeError:
# No event loop, skip async save
pass
# Try to flush any pending writes
if hasattr(memory, 'flush') and callable(getattr(memory, 'flush')):
memory.flush()
logger.debug(f"[MemoryPersistence] Flushed memory {i}")
except Exception as e:
logger.warning(f"[MemoryPersistence] Failed to persist memory {i}: {e}")
logger.info("[MemoryPersistence] Memory persistence complete")
async def _persist_long_term_memories_async() -> None:
"""
Asynchronously persist all long-term memories.
This is the preferred method for persisting memories when
an async context is available.
"""
global _long_term_memories
if not _long_term_memories:
return
import logging
logger = logging.getLogger(__name__)
logger.info(f"[MemoryPersistence] Persisting {len(_long_term_memories)} memory instances async...")
for i, memory in enumerate(_long_term_memories):
try:
# Try async save first
if hasattr(memory, 'save') and callable(getattr(memory, 'save')):
await memory.save()
logger.debug(f"[MemoryPersistence] Saved memory {i} (async)")
# Try flush if available
if hasattr(memory, 'flush') and callable(getattr(memory, 'flush')):
memory.flush()
logger.debug(f"[MemoryPersistence] Flushed memory {i}")
except Exception as e:
logger.warning(f"[MemoryPersistence] Failed to persist memory {i}: {e}")
logger.info("[MemoryPersistence] Async memory persistence complete")
def create_long_term_memory(agent_name: str, run_id: str, run_dir: Path):
"""Create ReMeTaskLongTermMemory for an agent."""
try:
@@ -96,6 +195,179 @@ def create_long_term_memory(agent_name: str, run_id: str, run_dir: Path):
)
def _resolve_evo_agent_ids() -> set[str]:
"""Return agent ids selected to use EvoAgent.
By default, all supported roles use EvoAgent.
"""
raw = os.getenv("EVO_AGENT_IDS", "")
if not raw.strip():
# Default: all supported roles use EvoAgent
return set(ANALYST_TYPES) | {"risk_manager", "portfolio_manager"}
requested = {
item.strip()
for item in raw.split(",")
if item.strip()
}
return {
agent_id
for agent_id in requested
if agent_id in ANALYST_TYPES or agent_id in {"risk_manager", "portfolio_manager"}
}
def _create_analyst_agent(
*,
analyst_type: str,
run_id: str,
model,
formatter,
skills_manager: SkillsManager,
active_skill_map: Dict[str, list[Path]],
long_term_memory=None,
):
"""Create one analyst agent, optionally using EvoAgent."""
active_skill_dirs = active_skill_map.get(analyst_type, [])
toolkit = create_agent_toolkit(
analyst_type,
run_id,
active_skill_dirs=active_skill_dirs,
)
use_evo_agent = analyst_type in _resolve_evo_agent_ids()
if use_evo_agent:
workspace_dir = skills_manager.get_agent_asset_dir(run_id, analyst_type)
agent_config = load_agent_workspace_config(workspace_dir / "agent.yaml")
agent = EvoAgent(
agent_id=analyst_type,
config_name=run_id,
workspace_dir=workspace_dir,
model=model,
formatter=formatter,
skills_manager=skills_manager,
prompt_files=agent_config.prompt_files,
long_term_memory=long_term_memory,
)
agent.toolkit = toolkit
setattr(agent, "workspace_id", run_id)
return agent
return AnalystAgent(
analyst_type=analyst_type,
toolkit=toolkit,
model=model,
formatter=formatter,
agent_id=analyst_type,
config={"config_name": run_id},
long_term_memory=long_term_memory,
)
def _create_risk_manager_agent(
*,
run_id: str,
model,
formatter,
skills_manager: SkillsManager,
active_skill_map: Dict[str, list[Path]],
long_term_memory=None,
):
"""Create the risk manager, optionally using EvoAgent."""
active_skill_dirs = active_skill_map.get("risk_manager", [])
toolkit = create_agent_toolkit(
"risk_manager",
run_id,
active_skill_dirs=active_skill_dirs,
)
use_evo_agent = "risk_manager" in _resolve_evo_agent_ids()
if use_evo_agent:
workspace_dir = skills_manager.get_agent_asset_dir(run_id, "risk_manager")
agent_config = load_agent_workspace_config(workspace_dir / "agent.yaml")
agent = EvoAgent(
agent_id="risk_manager",
config_name=run_id,
workspace_dir=workspace_dir,
model=model,
formatter=formatter,
skills_manager=skills_manager,
prompt_files=agent_config.prompt_files,
long_term_memory=long_term_memory,
)
agent.toolkit = toolkit
setattr(agent, "workspace_id", run_id)
return agent
return RiskAgent(
model=model,
formatter=formatter,
name="risk_manager",
config={"config_name": run_id},
long_term_memory=long_term_memory,
toolkit=toolkit,
)
def _create_portfolio_manager_agent(
*,
run_id: str,
model,
formatter,
initial_cash: float,
margin_requirement: float,
skills_manager: SkillsManager,
active_skill_map: Dict[str, list[Path]],
long_term_memory=None,
):
"""Create the portfolio manager, optionally using EvoAgent."""
active_skill_dirs = active_skill_map.get("portfolio_manager", [])
use_evo_agent = "portfolio_manager" in _resolve_evo_agent_ids()
if use_evo_agent:
workspace_dir = skills_manager.get_agent_asset_dir(
run_id,
"portfolio_manager",
)
agent_config = load_agent_workspace_config(workspace_dir / "agent.yaml")
agent = EvoAgent(
agent_id="portfolio_manager",
config_name=run_id,
workspace_dir=workspace_dir,
model=model,
formatter=formatter,
skills_manager=skills_manager,
prompt_files=agent_config.prompt_files,
initial_cash=initial_cash,
margin_requirement=margin_requirement,
long_term_memory=long_term_memory,
)
agent.toolkit = create_agent_toolkit(
"portfolio_manager",
run_id,
owner=agent,
active_skill_dirs=active_skill_dirs,
)
setattr(agent, "workspace_id", run_id)
return agent
return PMAgent(
name="portfolio_manager",
model=model,
formatter=formatter,
initial_cash=initial_cash,
margin_requirement=margin_requirement,
config={"config_name": run_id},
long_term_memory=long_term_memory,
toolkit_factory=create_agent_toolkit,
toolkit_factory_kwargs={
"active_skill_dirs": active_skill_dirs,
},
)
def create_agents(
run_id: str,
run_dir: Path,
@@ -129,11 +401,6 @@ def create_agents(
for analyst_type in ANALYST_TYPES:
model = get_agent_model(analyst_type)
formatter = get_agent_formatter(analyst_type)
toolkit = create_agent_toolkit(
analyst_type,
run_id,
active_skill_dirs=active_skill_map.get(analyst_type, []),
)
long_term_memory = None
if enable_long_term_memory:
@@ -141,13 +408,13 @@ def create_agents(
if long_term_memory:
long_term_memories.append(long_term_memory)
analyst = AnalystAgent(
analyst = _create_analyst_agent(
analyst_type=analyst_type,
toolkit=toolkit,
run_id=run_id,
model=model,
formatter=formatter,
agent_id=analyst_type,
config={"config_name": run_id},
skills_manager=skills_manager,
active_skill_map=active_skill_map,
long_term_memory=long_term_memory,
)
analysts.append(analyst)
@@ -159,17 +426,13 @@ def create_agents(
if risk_long_term_memory:
long_term_memories.append(risk_long_term_memory)
risk_manager = RiskAgent(
risk_manager = _create_risk_manager_agent(
run_id=run_id,
model=get_agent_model("risk_manager"),
formatter=get_agent_formatter("risk_manager"),
name="risk_manager",
config={"config_name": run_id},
skills_manager=skills_manager,
active_skill_map=active_skill_map,
long_term_memory=risk_long_term_memory,
toolkit=create_agent_toolkit(
"risk_manager",
run_id,
active_skill_dirs=active_skill_map.get("risk_manager", []),
),
)
# Create portfolio manager
@@ -179,18 +442,15 @@ def create_agents(
if pm_long_term_memory:
long_term_memories.append(pm_long_term_memory)
portfolio_manager = PMAgent(
name="portfolio_manager",
portfolio_manager = _create_portfolio_manager_agent(
run_id=run_id,
model=get_agent_model("portfolio_manager"),
formatter=get_agent_formatter("portfolio_manager"),
initial_cash=initial_cash,
margin_requirement=margin_requirement,
config={"config_name": run_id},
skills_manager=skills_manager,
active_skill_map=active_skill_map,
long_term_memory=pm_long_term_memory,
toolkit_factory=create_agent_toolkit,
toolkit_factory_kwargs={
"active_skill_dirs": active_skill_map.get("portfolio_manager", []),
},
)
return analysts, risk_manager, portfolio_manager, long_term_memories
@@ -400,6 +660,9 @@ async def run_pipeline(
)
_set_gateway(gateway)
# Set global memory references for persistence
_set_long_term_memories(long_term_memories)
# Start pipeline execution
async with AsyncExitStack() as stack:
# Enter long-term memory contexts
@@ -467,6 +730,12 @@ async def run_pipeline(
# Cleanup
logger.info("[Pipeline] Cleaning up...")
# Persist long-term memories before cleanup
try:
await _persist_long_term_memories_async()
except Exception as e:
logger.warning(f"[Pipeline] Memory persistence error: {e}")
# Stop Gateway
try:
stop_gateway()
@@ -474,6 +743,9 @@ async def run_pipeline(
except Exception as e:
logger.error(f"[Pipeline] Error stopping gateway: {e}")
# Clear memory references
_clear_long_term_memories()
clear_shutdown_event()
clear_global_runtime_manager()
from backend.api.runtime import unregister_runtime_manager

View File

@@ -463,6 +463,34 @@ class StateSync:
limit=self.storage.max_feed_history,
) or self._state.get("last_day_history", [])
persisted_state = self.storage.read_persisted_server_state()
dashboard_snapshot = (
self.storage.build_dashboard_snapshot_from_state(self._state)
if include_dashboard
else None
)
dashboard_holdings = (
dashboard_snapshot.get("holdings", [])
if dashboard_snapshot is not None
else self._state.get("holdings", [])
)
dashboard_trades = (
dashboard_snapshot.get("trades", [])
if dashboard_snapshot is not None
else self._state.get("trades", [])
)
dashboard_stats = (
dashboard_snapshot.get("stats", {})
if dashboard_snapshot is not None
else self._state.get("stats", {})
)
dashboard_leaderboard = (
dashboard_snapshot.get("leaderboard", [])
if dashboard_snapshot is not None
else self._state.get("leaderboard", [])
)
portfolio_state = self._state.get("portfolio") or persisted_state.get("portfolio") or {}
payload = {
"server_mode": self._state.get("server_mode", "live"),
"is_backtest": self._state.get("is_backtest", False),
@@ -476,24 +504,23 @@ class StateSync:
"trading_days_completed",
0,
),
"holdings": self._state.get("holdings", []),
"trades": self._state.get("trades", []),
"stats": self._state.get("stats", {}),
"leaderboard": self._state.get("leaderboard", []),
"portfolio": self._state.get("portfolio", {}),
"holdings": dashboard_holdings,
"trades": dashboard_trades,
"stats": dashboard_stats,
"leaderboard": dashboard_leaderboard,
"portfolio": portfolio_state,
"realtime_prices": self._state.get("realtime_prices", {}),
"data_sources": self._state.get("data_sources", {}),
"price_history": self._state.get("price_history", {}),
}
if include_dashboard:
dashboard_snapshot = self.storage.build_dashboard_snapshot_from_state(self._state)
payload["dashboard"] = {
"summary": dashboard_snapshot.get("summary"),
"holdings": dashboard_snapshot.get("holdings"),
"stats": dashboard_snapshot.get("stats"),
"trades": dashboard_snapshot.get("trades"),
"leaderboard": dashboard_snapshot.get("leaderboard"),
"holdings": dashboard_holdings,
"stats": dashboard_stats,
"trades": dashboard_trades,
"leaderboard": dashboard_leaderboard,
}
return payload