feat(agent): complete EvoAgent integration for all 6 agent roles
Migrate all agent roles from Legacy to EvoAgent architecture: - fundamentals_analyst, technical_analyst, sentiment_analyst, valuation_analyst - risk_manager, portfolio_manager Key changes: - EvoAgent now supports Portfolio Manager compatibility methods (_make_decision, get_decisions, get_portfolio_state, load_portfolio_state, update_portfolio) - Add UnifiedAgentFactory for centralized agent creation - ToolGuard with batch approval API and WebSocket broadcast - Legacy agents marked deprecated (AnalystAgent, RiskAgent, PMAgent) - Remove backend/agents/compat.py migration shim - Add run_id alongside workspace_id for semantic clarity - Complete integration test coverage (13 tests) - All smoke tests passing for 6 agent roles Constraint: Must maintain backward compatibility with existing run configs Constraint: Memory support must work with EvoAgent (no fallback to Legacy) Rejected: Separate PM implementation for EvoAgent | unified approach cleaner Confidence: high Scope-risk: broad Directive: EVO_AGENT_IDS env var still respected but defaults to all roles Not-tested: Kubernetes sandbox mode for skill execution
This commit is contained in:
@@ -28,6 +28,19 @@ def test_agent_service_excludes_runtime_routes(tmp_path):
|
||||
assert "/api/runtime/gateway/port" not in paths
|
||||
|
||||
|
||||
def test_agent_service_status_includes_scope_metadata(tmp_path):
|
||||
app = create_app(project_root=tmp_path)
|
||||
|
||||
with TestClient(app) as client:
|
||||
response = client.get("/api/status")
|
||||
|
||||
assert response.status_code == 200
|
||||
payload = response.json()
|
||||
assert payload["scope"]["design_time_registry"]["root"] == str(tmp_path / "workspaces")
|
||||
assert payload["scope"]["runtime_assets"]["root"] == str(tmp_path / "runs")
|
||||
assert "runs/<run_id>" in payload["scope"]["agent_route_note"]
|
||||
|
||||
|
||||
def test_agent_service_read_routes(monkeypatch, tmp_path):
|
||||
class _FakeSkillsManager:
|
||||
project_root = tmp_path
|
||||
@@ -96,9 +109,14 @@ def test_agent_service_read_routes(monkeypatch, tmp_path):
|
||||
|
||||
assert profile.status_code == 200
|
||||
assert profile.json()["profile"]["model_name"] == "deepseek-v3.2"
|
||||
assert profile.json()["scope_type"] == "runtime_run"
|
||||
assert skills.status_code == 200
|
||||
assert skills.json()["skills"][0]["skill_name"] == "demo_skill"
|
||||
assert skills.json()["scope_type"] == "runtime_run"
|
||||
assert detail.status_code == 200
|
||||
assert detail.json()["skill"]["content"] == "# demo"
|
||||
assert detail.json()["scope_type"] == "runtime_run"
|
||||
assert workspace_file.status_code == 200
|
||||
assert workspace_file.json()["content"] == "demo:portfolio_manager:MEMORY.md"
|
||||
assert workspace_file.json()["scope_type"] == "runtime_run"
|
||||
assert "runs/<run_id>" in workspace_file.json()["scope_note"]
|
||||
|
||||
@@ -311,7 +311,7 @@ class TestRiskAgent:
|
||||
|
||||
|
||||
class TestStorageService:
|
||||
def test_storage_service_defaults_to_live_config(self):
|
||||
def test_storage_service_defaults_to_runtime_config(self):
|
||||
from backend.services.storage import StorageService
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
@@ -320,7 +320,7 @@ class TestStorageService:
|
||||
initial_cash=100000.0,
|
||||
)
|
||||
|
||||
assert storage.config_name == "live"
|
||||
assert storage.config_name == "runtime"
|
||||
|
||||
def test_calculate_portfolio_value_cash_only(self):
|
||||
from backend.services.storage import StorageService
|
||||
@@ -404,7 +404,7 @@ class TestStorageService:
|
||||
assert trades[0]["qty"] == 50
|
||||
assert trades[0]["price"] == 200.0
|
||||
|
||||
def test_generate_summary(self):
|
||||
def test_build_summary_export(self):
|
||||
from backend.services.storage import StorageService
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
@@ -424,13 +424,12 @@ class TestStorageService:
|
||||
}
|
||||
prices = {"AAPL": 500.0}
|
||||
|
||||
storage._generate_summary(state, 100000.0, prices)
|
||||
summary = storage._build_summary_export(state, 100000.0, prices)
|
||||
|
||||
summary = storage.load_file("summary")
|
||||
assert summary["totalAssetValue"] == 100000.0
|
||||
assert summary["totalReturn"] == 0.0
|
||||
|
||||
def test_generate_holdings(self):
|
||||
def test_build_holdings_export(self):
|
||||
from backend.services.storage import StorageService
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
@@ -448,9 +447,8 @@ class TestStorageService:
|
||||
}
|
||||
prices = {"AAPL": 500.0}
|
||||
|
||||
storage._generate_holdings(state, prices)
|
||||
holdings = storage._build_holdings_export(state, prices)
|
||||
|
||||
holdings = storage.load_file("holdings")
|
||||
assert len(holdings) == 2 # AAPL + CASH
|
||||
|
||||
aapl_holding = next(
|
||||
@@ -461,6 +459,150 @@ class TestStorageService:
|
||||
assert aapl_holding["quantity"] == 100
|
||||
assert aapl_holding["currentPrice"] == 500.0
|
||||
|
||||
def test_export_dashboard_compatibility_files_writes_expected_exports(self):
|
||||
from backend.services.storage import StorageService
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
storage = StorageService(
|
||||
dashboard_dir=Path(tmpdir) / "team_dashboard",
|
||||
initial_cash=100000.0,
|
||||
)
|
||||
state = {
|
||||
"portfolio_state": {
|
||||
"cash": 90000.0,
|
||||
"positions": {"AAPL": {"long": 50, "short": 0}},
|
||||
"margin_used": 0.0,
|
||||
},
|
||||
"equity_history": [{"t": 1000, "v": 100000}],
|
||||
"baseline_history": [{"t": 1000, "v": 100000}],
|
||||
"baseline_vw_history": [{"t": 1000, "v": 100000}],
|
||||
"momentum_history": [{"t": 1000, "v": 100000}],
|
||||
"all_trades": [
|
||||
{
|
||||
"id": "t1",
|
||||
"ts": 1000,
|
||||
"trading_date": "2024-01-15",
|
||||
"side": "LONG",
|
||||
"ticker": "AAPL",
|
||||
"qty": 50,
|
||||
"price": 200.0,
|
||||
}
|
||||
],
|
||||
}
|
||||
prices = {"AAPL": 200.0}
|
||||
|
||||
storage.export_dashboard_compatibility_files(
|
||||
state,
|
||||
net_value=100000.0,
|
||||
prices=prices,
|
||||
)
|
||||
|
||||
assert storage.load_export_file("summary")["totalAssetValue"] == 100000.0
|
||||
holdings = storage.load_export_file("holdings")
|
||||
assert any(item["ticker"] == "AAPL" for item in holdings)
|
||||
assert storage.load_export_file("stats")["totalTrades"] == 1
|
||||
assert storage.load_export_file("trades")[0]["ticker"] == "AAPL"
|
||||
|
||||
def test_build_dashboard_snapshot_prefers_persisted_runtime_state_when_memory_view_is_sparse(self):
|
||||
from backend.services.storage import StorageService
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
dashboard_dir = Path(tmpdir) / "team_dashboard"
|
||||
storage = StorageService(
|
||||
dashboard_dir=dashboard_dir,
|
||||
initial_cash=100000.0,
|
||||
)
|
||||
storage.save_server_state(
|
||||
{
|
||||
"portfolio": {
|
||||
"total_value": 123456.0,
|
||||
"cash": 45678.0,
|
||||
"pnl_percent": 23.45,
|
||||
},
|
||||
"holdings": [{"ticker": "AAPL", "quantity": 10}],
|
||||
"stats": {"totalTrades": 3},
|
||||
"trades": [{"ticker": "AAPL"}],
|
||||
"leaderboard": [{"agentId": "technical_analyst"}],
|
||||
}
|
||||
)
|
||||
|
||||
snapshot = storage.build_dashboard_snapshot_from_state({"portfolio": {}})
|
||||
|
||||
assert snapshot["summary"]["totalAssetValue"] == 123456.0
|
||||
assert snapshot["holdings"][0]["ticker"] == "AAPL"
|
||||
assert snapshot["trades"][0]["ticker"] == "AAPL"
|
||||
assert snapshot["leaderboard"][0]["agentId"] == "technical_analyst"
|
||||
|
||||
def test_runtime_leaderboard_prefers_server_state_and_persists_back(self):
|
||||
from backend.services.storage import StorageService
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
dashboard_dir = Path(tmpdir) / "team_dashboard"
|
||||
storage = StorageService(
|
||||
dashboard_dir=dashboard_dir,
|
||||
initial_cash=100000.0,
|
||||
)
|
||||
storage.save_export_file("leaderboard", [{"agentId": "export_only"}])
|
||||
storage.save_server_state({"leaderboard": [{"agentId": "runtime_state"}]})
|
||||
|
||||
leaderboard = storage.load_runtime_leaderboard()
|
||||
assert leaderboard[0]["agentId"] == "runtime_state"
|
||||
|
||||
updated = [{"agentId": "updated_runtime"}]
|
||||
storage.persist_runtime_leaderboard(updated)
|
||||
|
||||
saved_state = storage.read_persisted_server_state()
|
||||
saved_export = storage.load_export_file("leaderboard")
|
||||
assert saved_state["leaderboard"][0]["agentId"] == "updated_runtime"
|
||||
assert saved_export[0]["agentId"] == "updated_runtime"
|
||||
|
||||
def test_compatibility_exports_can_be_disabled_without_breaking_runtime_leaderboard(self):
|
||||
from backend.services.storage import StorageService
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
dashboard_dir = Path(tmpdir) / "team_dashboard"
|
||||
storage = StorageService(
|
||||
dashboard_dir=dashboard_dir,
|
||||
initial_cash=100000.0,
|
||||
enable_compat_exports=False,
|
||||
)
|
||||
|
||||
storage.generate_leaderboard()
|
||||
storage.export_dashboard_compatibility_files(
|
||||
{
|
||||
"portfolio_state": {
|
||||
"cash": 100000.0,
|
||||
"positions": {},
|
||||
"margin_used": 0.0,
|
||||
},
|
||||
"equity_history": [],
|
||||
"baseline_history": [],
|
||||
"baseline_vw_history": [],
|
||||
"momentum_history": [],
|
||||
"all_trades": [],
|
||||
},
|
||||
net_value=100000.0,
|
||||
prices={},
|
||||
)
|
||||
|
||||
assert not dashboard_dir.joinpath("summary.json").exists()
|
||||
assert storage.load_runtime_leaderboard()
|
||||
persisted = storage.read_persisted_server_state()
|
||||
assert persisted["leaderboard"]
|
||||
|
||||
def test_compatibility_exports_default_can_be_disabled_via_env(self, monkeypatch):
|
||||
from backend.services.storage import StorageService
|
||||
|
||||
monkeypatch.setenv("ENABLE_DASHBOARD_COMPAT_EXPORTS", "false")
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
storage = StorageService(
|
||||
dashboard_dir=Path(tmpdir) / "team_dashboard",
|
||||
initial_cash=100000.0,
|
||||
)
|
||||
|
||||
assert storage.enable_compat_exports is False
|
||||
|
||||
|
||||
class TestTradeExecutor:
|
||||
def test_execute_trade_long(self):
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from pathlib import Path
|
||||
|
||||
from typer.testing import CliRunner
|
||||
|
||||
from backend import cli
|
||||
|
||||
|
||||
@@ -126,6 +128,86 @@ def test_backtest_runs_full_market_store_prepare_before_start(monkeypatch, tmp_p
|
||||
]
|
||||
|
||||
|
||||
def test_live_cli_defaults_to_generic_run_label(monkeypatch, tmp_path):
|
||||
project_root = tmp_path
|
||||
(project_root / ".env").write_text("FINNHUB_API_KEY=test\n", encoding="utf-8")
|
||||
|
||||
calls = []
|
||||
runner = CliRunner()
|
||||
|
||||
monkeypatch.setattr(cli, "get_project_root", lambda: project_root)
|
||||
monkeypatch.setattr(cli, "handle_history_cleanup", lambda config_name, auto_clean=False: None)
|
||||
monkeypatch.setattr(cli, "run_data_updater", lambda project_root: None)
|
||||
monkeypatch.setattr(cli, "auto_update_market_store", lambda config_name, end_date=None: None)
|
||||
monkeypatch.setattr(
|
||||
cli,
|
||||
"auto_enrich_market_store",
|
||||
lambda config_name, end_date=None, lookback_days=120, force=False: None,
|
||||
)
|
||||
monkeypatch.setattr(cli.os, "chdir", lambda path: None)
|
||||
|
||||
def fake_run(cmd, check=True, **kwargs):
|
||||
calls.append(cmd)
|
||||
return 0
|
||||
|
||||
monkeypatch.setattr(cli.subprocess, "run", fake_run)
|
||||
|
||||
result = runner.invoke(cli.app, ["live", "--trigger-time", "now"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert calls
|
||||
assert "--config-name" in calls[0]
|
||||
config_index = calls[0].index("--config-name")
|
||||
assert calls[0][config_index + 1] == "default_live_run"
|
||||
|
||||
|
||||
def test_backtest_cli_defaults_to_generic_run_label(monkeypatch, tmp_path):
|
||||
project_root = tmp_path
|
||||
calls = []
|
||||
runner = CliRunner()
|
||||
|
||||
monkeypatch.setattr(cli, "get_project_root", lambda: project_root)
|
||||
monkeypatch.setattr(cli, "handle_history_cleanup", lambda config_name, auto_clean=False: None)
|
||||
monkeypatch.setattr(cli, "run_data_updater", lambda project_root: None)
|
||||
monkeypatch.setattr(
|
||||
cli,
|
||||
"auto_prepare_backtest_market_store",
|
||||
lambda config_name, start_date, end_date: None,
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
cli,
|
||||
"auto_enrich_market_store",
|
||||
lambda config_name, end_date=None, lookback_days=120, force=False: None,
|
||||
)
|
||||
monkeypatch.setattr(cli.os, "chdir", lambda path: None)
|
||||
|
||||
def fake_run(cmd, check=True, **kwargs):
|
||||
calls.append(cmd)
|
||||
return 0
|
||||
|
||||
monkeypatch.setattr(cli.subprocess, "run", fake_run)
|
||||
|
||||
result = runner.invoke(
|
||||
cli.app,
|
||||
["backtest", "--start", "2026-03-01", "--end", "2026-03-10"],
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert calls
|
||||
assert "--config-name" in calls[0]
|
||||
config_index = calls[0].index("--config-name")
|
||||
assert calls[0][config_index + 1] == "default_backtest_run"
|
||||
|
||||
|
||||
def test_main_parser_defaults_to_generic_run_label():
|
||||
from backend.main import build_arg_parser
|
||||
|
||||
parser = build_arg_parser()
|
||||
args = parser.parse_args([])
|
||||
|
||||
assert args.config_name == "default_run"
|
||||
|
||||
|
||||
def test_ingest_enrich_runs_batch_enrichment(monkeypatch):
|
||||
calls = []
|
||||
|
||||
|
||||
405
backend/tests/test_evo_agent_integration.py
Normal file
405
backend/tests/test_evo_agent_integration.py
Normal file
@@ -0,0 +1,405 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Integration tests for EvoAgent system.
|
||||
|
||||
These tests verify the integration between:
|
||||
- UnifiedAgentFactory
|
||||
- EvoAgent
|
||||
- ToolGuardMixin
|
||||
- Workspace-driven configuration
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, AsyncMock
|
||||
|
||||
|
||||
class TestUnifiedAgentFactoryIntegration:
|
||||
"""Test UnifiedAgentFactory creates agents correctly."""
|
||||
|
||||
def test_factory_creates_analyst_with_workspace_config(self, tmp_path, monkeypatch):
|
||||
"""Test that factory creates EvoAgent with workspace config."""
|
||||
from backend.agents.unified_factory import UnifiedAgentFactory
|
||||
|
||||
# Setup mock skills manager
|
||||
class MockSkillsManager:
|
||||
def get_agent_asset_dir(self, config_name, agent_id):
|
||||
path = tmp_path / "runs" / config_name / "agents" / agent_id
|
||||
path.mkdir(parents=True, exist_ok=True)
|
||||
return path
|
||||
|
||||
# Create workspace config
|
||||
workspace_dir = tmp_path / "runs" / "test_config" / "agents" / "fundamentals_analyst"
|
||||
workspace_dir.mkdir(parents=True, exist_ok=True)
|
||||
(workspace_dir / "agent.yaml").write_text(
|
||||
"prompt_files:\n - SOUL.md\n - CUSTOM.md\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
(workspace_dir / "SOUL.md").write_text("System prompt content", encoding="utf-8")
|
||||
(workspace_dir / "CUSTOM.md").write_text("Custom instructions", encoding="utf-8")
|
||||
|
||||
factory = UnifiedAgentFactory(
|
||||
config_name="test_config",
|
||||
skills_manager=MockSkillsManager(),
|
||||
)
|
||||
|
||||
# Mock EvoAgent creation by patching where it's imported
|
||||
created_kwargs = {}
|
||||
|
||||
class MockEvoAgent:
|
||||
def __init__(self, **kwargs):
|
||||
created_kwargs.update(kwargs)
|
||||
self.toolkit = None
|
||||
|
||||
# Patch at the location where EvoAgent is imported in unified_factory
|
||||
import backend.agents.base.evo_agent as evo_agent_module
|
||||
original_evo_agent = evo_agent_module.EvoAgent
|
||||
evo_agent_module.EvoAgent = MockEvoAgent
|
||||
|
||||
try:
|
||||
monkeypatch.setattr(
|
||||
factory,
|
||||
"_create_toolkit",
|
||||
lambda *args, **kwargs: MagicMock(),
|
||||
)
|
||||
|
||||
agent = factory.create_analyst(
|
||||
analyst_type="fundamentals_analyst",
|
||||
model=MagicMock(),
|
||||
formatter=MagicMock(),
|
||||
)
|
||||
|
||||
assert isinstance(agent, MockEvoAgent)
|
||||
assert created_kwargs["agent_id"] == "fundamentals_analyst"
|
||||
assert created_kwargs["config_name"] == "test_config"
|
||||
assert "SOUL.md" in created_kwargs["prompt_files"]
|
||||
finally:
|
||||
evo_agent_module.EvoAgent = original_evo_agent
|
||||
|
||||
def test_factory_creates_risk_manager(self, tmp_path, monkeypatch):
|
||||
"""Test that factory creates risk manager EvoAgent."""
|
||||
from backend.agents.unified_factory import UnifiedAgentFactory
|
||||
|
||||
class MockSkillsManager:
|
||||
def get_agent_asset_dir(self, config_name, agent_id):
|
||||
path = tmp_path / "runs" / config_name / "agents" / agent_id
|
||||
path.mkdir(parents=True, exist_ok=True)
|
||||
return path
|
||||
|
||||
factory = UnifiedAgentFactory(
|
||||
config_name="test_config",
|
||||
skills_manager=MockSkillsManager(),
|
||||
)
|
||||
|
||||
created_kwargs = {}
|
||||
|
||||
class MockEvoAgent:
|
||||
def __init__(self, **kwargs):
|
||||
created_kwargs.update(kwargs)
|
||||
self.toolkit = None
|
||||
|
||||
import backend.agents.base.evo_agent as evo_agent_module
|
||||
original_evo_agent = evo_agent_module.EvoAgent
|
||||
evo_agent_module.EvoAgent = MockEvoAgent
|
||||
|
||||
try:
|
||||
monkeypatch.setattr(
|
||||
factory,
|
||||
"_create_toolkit",
|
||||
lambda *args, **kwargs: MagicMock(),
|
||||
)
|
||||
|
||||
agent = factory.create_risk_manager(
|
||||
model=MagicMock(),
|
||||
formatter=MagicMock(),
|
||||
)
|
||||
|
||||
assert isinstance(agent, MockEvoAgent)
|
||||
assert created_kwargs["agent_id"] == "risk_manager"
|
||||
finally:
|
||||
evo_agent_module.EvoAgent = original_evo_agent
|
||||
|
||||
def test_factory_creates_portfolio_manager(self, tmp_path, monkeypatch):
|
||||
"""Test that factory creates portfolio manager EvoAgent with financial params."""
|
||||
from backend.agents.unified_factory import UnifiedAgentFactory
|
||||
|
||||
class MockSkillsManager:
|
||||
def get_agent_asset_dir(self, config_name, agent_id):
|
||||
path = tmp_path / "runs" / config_name / "agents" / agent_id
|
||||
path.mkdir(parents=True, exist_ok=True)
|
||||
return path
|
||||
|
||||
factory = UnifiedAgentFactory(
|
||||
config_name="test_config",
|
||||
skills_manager=MockSkillsManager(),
|
||||
)
|
||||
|
||||
created_kwargs = {}
|
||||
|
||||
def mock_make_decision(*args, **kwargs):
|
||||
pass
|
||||
|
||||
class MockEvoAgent:
|
||||
def __init__(self, **kwargs):
|
||||
created_kwargs.update(kwargs)
|
||||
self.toolkit = None
|
||||
# Add _make_decision for PM toolkit registration
|
||||
self._make_decision = mock_make_decision
|
||||
|
||||
import backend.agents.base.evo_agent as evo_agent_module
|
||||
original_evo_agent = evo_agent_module.EvoAgent
|
||||
evo_agent_module.EvoAgent = MockEvoAgent
|
||||
|
||||
try:
|
||||
agent = factory.create_portfolio_manager(
|
||||
model=MagicMock(),
|
||||
formatter=MagicMock(),
|
||||
initial_cash=50000.0,
|
||||
margin_requirement=0.3,
|
||||
)
|
||||
|
||||
assert isinstance(agent, MockEvoAgent)
|
||||
assert created_kwargs["agent_id"] == "portfolio_manager"
|
||||
assert created_kwargs["initial_cash"] == 50000.0
|
||||
assert created_kwargs["margin_requirement"] == 0.3
|
||||
finally:
|
||||
evo_agent_module.EvoAgent = original_evo_agent
|
||||
|
||||
def test_factory_respects_evo_agent_ids_env(self, monkeypatch, tmp_path):
|
||||
"""Test that factory respects EVO_AGENT_IDS environment variable."""
|
||||
from backend.agents.unified_factory import UnifiedAgentFactory
|
||||
|
||||
# Only enable technical_analyst as EvoAgent
|
||||
monkeypatch.setenv("EVO_AGENT_IDS", "technical_analyst")
|
||||
|
||||
class MockSkillsManager:
|
||||
def get_agent_asset_dir(self, config_name, agent_id):
|
||||
path = tmp_path / "runs" / config_name / "agents" / agent_id
|
||||
path.mkdir(parents=True, exist_ok=True)
|
||||
return path
|
||||
|
||||
factory = UnifiedAgentFactory(
|
||||
config_name="test_config",
|
||||
skills_manager=MockSkillsManager(),
|
||||
)
|
||||
|
||||
# technical_analyst should use EvoAgent
|
||||
assert factory._should_use_evo_agent("technical_analyst") is True
|
||||
# fundamentals_analyst should use legacy
|
||||
assert factory._should_use_evo_agent("fundamentals_analyst") is False
|
||||
|
||||
def test_factory_legacy_mode_disables_evo_agent(self, monkeypatch):
|
||||
"""Test that EVO_AGENT_IDS=legacy disables all EvoAgents."""
|
||||
from backend.agents.unified_factory import UnifiedAgentFactory
|
||||
|
||||
monkeypatch.setenv("EVO_AGENT_IDS", "legacy")
|
||||
|
||||
factory = UnifiedAgentFactory(
|
||||
config_name="test_config",
|
||||
skills_manager=MagicMock(),
|
||||
)
|
||||
|
||||
assert factory._evo_agent_ids == set()
|
||||
assert factory._should_use_evo_agent("any_agent") is False
|
||||
|
||||
|
||||
class TestToolGuardIntegration:
|
||||
"""Test ToolGuardMixin integration with EvoAgent."""
|
||||
|
||||
def test_tool_guard_intercepts_guarded_tools(self):
|
||||
"""Test that ToolGuard intercepts tools requiring approval."""
|
||||
from backend.agents.base.tool_guard import ToolGuardMixin
|
||||
|
||||
class TestAgent(ToolGuardMixin):
|
||||
def __init__(self):
|
||||
self._init_tool_guard()
|
||||
self.agent_id = "test_agent"
|
||||
self.workspace_id = "test_workspace"
|
||||
self.session_id = "test_session"
|
||||
|
||||
agent = TestAgent()
|
||||
|
||||
# Verify place_order is in guarded tools
|
||||
assert agent._is_tool_guarded("place_order") is True
|
||||
assert agent._is_tool_denied("execute_shell_command") is True
|
||||
|
||||
def test_tool_guard_approval_flow(self):
|
||||
"""Test the full approval flow for a guarded tool."""
|
||||
from backend.agents.base.tool_guard import (
|
||||
ToolGuardStore,
|
||||
ApprovalStatus,
|
||||
)
|
||||
|
||||
store = ToolGuardStore()
|
||||
|
||||
# Create a pending approval record
|
||||
record = store.create_pending(
|
||||
tool_name="place_order",
|
||||
tool_input={"ticker": "AAPL", "quantity": 100},
|
||||
agent_id="test_agent",
|
||||
workspace_id="test_workspace",
|
||||
)
|
||||
|
||||
assert record.status == ApprovalStatus.PENDING
|
||||
assert record.tool_name == "place_order"
|
||||
|
||||
# Approve the request with resolved_by
|
||||
updated = store.set_status(record.approval_id, ApprovalStatus.APPROVED, resolved_by="test_user")
|
||||
assert updated.status == ApprovalStatus.APPROVED
|
||||
assert updated.resolved_by == "test_user"
|
||||
|
||||
def test_tool_guard_default_lists(self):
|
||||
"""Test default guarded and denied tool lists."""
|
||||
from backend.agents.base.tool_guard import (
|
||||
DEFAULT_GUARDED_TOOLS,
|
||||
DEFAULT_DENIED_TOOLS,
|
||||
)
|
||||
|
||||
# Critical tools should be guarded
|
||||
assert "place_order" in DEFAULT_GUARDED_TOOLS
|
||||
assert "modify_position" in DEFAULT_GUARDED_TOOLS
|
||||
assert "write_file" in DEFAULT_GUARDED_TOOLS
|
||||
assert "edit_file" in DEFAULT_GUARDED_TOOLS
|
||||
|
||||
# Dangerous tools should be denied
|
||||
assert "execute_shell_command" in DEFAULT_DENIED_TOOLS
|
||||
|
||||
|
||||
class TestEvoAgentWorkspaceIntegration:
|
||||
"""Test EvoAgent workspace-driven configuration."""
|
||||
|
||||
def test_evo_agent_loads_prompt_files_from_workspace(self, tmp_path, monkeypatch):
|
||||
"""Test that EvoAgent loads prompt files from workspace directory."""
|
||||
from backend.agents.base.evo_agent import EvoAgent
|
||||
|
||||
workspace_dir = tmp_path / "runs" / "demo" / "agents" / "test_analyst"
|
||||
workspace_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Create prompt files
|
||||
(workspace_dir / "SOUL.md").write_text(
|
||||
"You are a test analyst.", encoding="utf-8"
|
||||
)
|
||||
(workspace_dir / "INSTRUCTIONS.md").write_text(
|
||||
"Additional instructions.", encoding="utf-8"
|
||||
)
|
||||
|
||||
class MockToolkit:
|
||||
def __init__(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def register_agent_skill(self, path):
|
||||
pass
|
||||
|
||||
monkeypatch.setattr(
|
||||
"backend.agents.base.evo_agent.Toolkit",
|
||||
MockToolkit,
|
||||
)
|
||||
|
||||
class MockSkillsManager:
|
||||
def get_agent_active_root(self, config_name, agent_id):
|
||||
return workspace_dir / "skills" / "active"
|
||||
|
||||
def list_active_skill_metadata(self, config_name, agent_id):
|
||||
return []
|
||||
|
||||
agent = EvoAgent(
|
||||
agent_id="test_analyst",
|
||||
config_name="demo",
|
||||
workspace_dir=workspace_dir,
|
||||
model=MagicMock(),
|
||||
formatter=MagicMock(),
|
||||
skills_manager=MockSkillsManager(),
|
||||
prompt_files=["SOUL.md", "INSTRUCTIONS.md"],
|
||||
)
|
||||
|
||||
# Verify prompts are loaded into system prompt
|
||||
assert "You are a test analyst." in agent._sys_prompt
|
||||
assert "Additional instructions." in agent._sys_prompt
|
||||
|
||||
|
||||
class TestFactoryCaching:
|
||||
"""Test UnifiedAgentFactory caching behavior."""
|
||||
|
||||
def test_factory_cache_per_config(self, monkeypatch):
|
||||
"""Test that factory is cached per config name."""
|
||||
from backend.agents.unified_factory import (
|
||||
get_agent_factory,
|
||||
clear_factory_cache,
|
||||
)
|
||||
|
||||
# Clear any existing cache
|
||||
clear_factory_cache()
|
||||
|
||||
mock_skills_manager = MagicMock()
|
||||
|
||||
factory1 = get_agent_factory("config_a", mock_skills_manager)
|
||||
factory2 = get_agent_factory("config_a", mock_skills_manager)
|
||||
factory3 = get_agent_factory("config_b", mock_skills_manager)
|
||||
|
||||
# Same config should return same instance
|
||||
assert factory1 is factory2
|
||||
# Different config should return different instance
|
||||
assert factory1 is not factory3
|
||||
|
||||
def test_clear_factory_cache(self):
|
||||
"""Test that clear_factory_cache removes all cached factories."""
|
||||
from backend.agents.unified_factory import (
|
||||
get_agent_factory,
|
||||
clear_factory_cache,
|
||||
)
|
||||
|
||||
mock_skills_manager = MagicMock()
|
||||
|
||||
factory1 = get_agent_factory("config_c", mock_skills_manager)
|
||||
clear_factory_cache()
|
||||
factory2 = get_agent_factory("config_c", mock_skills_manager)
|
||||
|
||||
# After clearing cache, should be new instance
|
||||
assert factory1 is not factory2
|
||||
|
||||
|
||||
class TestDeprecationWarnings:
|
||||
"""Test that legacy agents emit deprecation warnings."""
|
||||
|
||||
def test_risk_agent_emits_deprecation_warning(self):
|
||||
"""Test that RiskAgent emits deprecation warning on import."""
|
||||
import warnings
|
||||
import sys
|
||||
|
||||
# Clear cache to force reimport
|
||||
modules_to_remove = [
|
||||
k for k in sys.modules.keys()
|
||||
if k.endswith("risk_manager") and "backend.agents" in k
|
||||
]
|
||||
for m in modules_to_remove:
|
||||
del sys.modules[m]
|
||||
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
warnings.simplefilter("always")
|
||||
from backend.agents.risk_manager import RiskAgent
|
||||
|
||||
deprecation_warnings = [
|
||||
x for x in w if issubclass(x.category, DeprecationWarning)
|
||||
]
|
||||
assert any("RiskAgent is deprecated" in str(x.message) for x in deprecation_warnings)
|
||||
|
||||
def test_pm_agent_emits_deprecation_warning(self):
|
||||
"""Test that PMAgent emits deprecation warning on import."""
|
||||
import warnings
|
||||
import sys
|
||||
|
||||
# Clear cache to force reimport
|
||||
modules_to_remove = [
|
||||
k for k in sys.modules.keys()
|
||||
if k.endswith("portfolio_manager") and "backend.agents" in k
|
||||
]
|
||||
for m in modules_to_remove:
|
||||
del sys.modules[m]
|
||||
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
warnings.simplefilter("always")
|
||||
from backend.agents.portfolio_manager import PMAgent
|
||||
|
||||
deprecation_warnings = [
|
||||
x for x in w if issubclass(x.category, DeprecationWarning)
|
||||
]
|
||||
assert any("PMAgent is deprecated" in str(x.message) for x in deprecation_warnings)
|
||||
429
backend/tests/test_evo_agent_selection.py
Normal file
429
backend/tests/test_evo_agent_selection.py
Normal file
@@ -0,0 +1,429 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Tests for selective EvoAgent construction."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from backend.config.constants import ANALYST_TYPES
|
||||
|
||||
|
||||
def test_main_resolve_evo_agent_ids_filters_unsupported_roles(monkeypatch):
|
||||
from backend import main as main_module
|
||||
|
||||
monkeypatch.setenv(
|
||||
"EVO_AGENT_IDS",
|
||||
"fundamentals_analyst,portfolio_manager,unknown,technical_analyst",
|
||||
)
|
||||
|
||||
resolved = main_module._resolve_evo_agent_ids()
|
||||
|
||||
assert resolved == {"fundamentals_analyst", "portfolio_manager", "technical_analyst"}
|
||||
|
||||
|
||||
def test_pipeline_runner_resolve_evo_agent_ids_keeps_supported_roles(monkeypatch):
|
||||
from backend.core import pipeline_runner as runner_module
|
||||
|
||||
monkeypatch.setenv("EVO_AGENT_IDS", "risk_manager,valuation_analyst")
|
||||
|
||||
resolved = runner_module._resolve_evo_agent_ids()
|
||||
|
||||
assert resolved == {"risk_manager", "valuation_analyst"}
|
||||
|
||||
|
||||
def test_main_create_analyst_agent_can_build_evo_agent(monkeypatch, tmp_path):
|
||||
from backend import main as main_module
|
||||
|
||||
created = {}
|
||||
|
||||
class DummySkillsManager:
|
||||
def get_agent_asset_dir(self, config_name, agent_id):
|
||||
path = tmp_path / "runs" / config_name / "agents" / agent_id
|
||||
path.mkdir(parents=True, exist_ok=True)
|
||||
(path / "agent.yaml").write_text(
|
||||
"prompt_files:\n - SOUL.md\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
return path
|
||||
|
||||
class DummyEvoAgent:
|
||||
def __init__(self, **kwargs):
|
||||
created.update(kwargs)
|
||||
self.toolkit = None
|
||||
|
||||
monkeypatch.setenv("EVO_AGENT_IDS", "fundamentals_analyst")
|
||||
monkeypatch.setattr(main_module, "EvoAgent", DummyEvoAgent)
|
||||
monkeypatch.setattr(main_module, "create_agent_toolkit", lambda *args, **kwargs: "toolkit")
|
||||
|
||||
agent = main_module._create_analyst_agent(
|
||||
analyst_type="fundamentals_analyst",
|
||||
config_name="demo",
|
||||
model="model",
|
||||
formatter="formatter",
|
||||
skills_manager=DummySkillsManager(),
|
||||
active_skill_map={"fundamentals_analyst": [Path("/tmp/skill")]},
|
||||
long_term_memory=None,
|
||||
)
|
||||
|
||||
assert isinstance(agent, DummyEvoAgent)
|
||||
assert created["agent_id"] == "fundamentals_analyst"
|
||||
assert created["config_name"] == "demo"
|
||||
assert created["prompt_files"] == ["SOUL.md"]
|
||||
assert agent.toolkit == "toolkit"
|
||||
assert agent.workspace_id == "demo"
|
||||
|
||||
|
||||
def test_main_create_risk_manager_can_build_evo_agent(monkeypatch, tmp_path):
|
||||
from backend import main as main_module
|
||||
|
||||
created = {}
|
||||
|
||||
class DummySkillsManager:
|
||||
def get_agent_asset_dir(self, config_name, agent_id):
|
||||
path = tmp_path / "runs" / config_name / "agents" / agent_id
|
||||
path.mkdir(parents=True, exist_ok=True)
|
||||
(path / "agent.yaml").write_text(
|
||||
"prompt_files:\n - SOUL.md\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
return path
|
||||
|
||||
class DummyEvoAgent:
|
||||
def __init__(self, **kwargs):
|
||||
created.update(kwargs)
|
||||
self.toolkit = None
|
||||
|
||||
monkeypatch.setenv("EVO_AGENT_IDS", "risk_manager")
|
||||
monkeypatch.setattr(main_module, "EvoAgent", DummyEvoAgent)
|
||||
monkeypatch.setattr(main_module, "create_agent_toolkit", lambda *args, **kwargs: "risk-toolkit")
|
||||
|
||||
agent = main_module._create_risk_manager_agent(
|
||||
config_name="demo",
|
||||
model="model",
|
||||
formatter="formatter",
|
||||
skills_manager=DummySkillsManager(),
|
||||
active_skill_map={"risk_manager": [Path("/tmp/skill")]},
|
||||
long_term_memory=None,
|
||||
)
|
||||
|
||||
assert isinstance(agent, DummyEvoAgent)
|
||||
assert created["agent_id"] == "risk_manager"
|
||||
assert created["config_name"] == "demo"
|
||||
assert created["prompt_files"] == ["SOUL.md"]
|
||||
assert agent.toolkit == "risk-toolkit"
|
||||
assert agent.workspace_id == "demo"
|
||||
|
||||
|
||||
def test_main_create_portfolio_manager_can_build_evo_agent(monkeypatch, tmp_path):
|
||||
from backend import main as main_module
|
||||
|
||||
created = {}
|
||||
|
||||
class DummySkillsManager:
|
||||
def get_agent_asset_dir(self, config_name, agent_id):
|
||||
path = tmp_path / "runs" / config_name / "agents" / agent_id
|
||||
path.mkdir(parents=True, exist_ok=True)
|
||||
(path / "agent.yaml").write_text(
|
||||
"prompt_files:\n - SOUL.md\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
return path
|
||||
|
||||
class DummyEvoAgent:
|
||||
def __init__(self, **kwargs):
|
||||
created.update(kwargs)
|
||||
self.toolkit = None
|
||||
|
||||
monkeypatch.setenv("EVO_AGENT_IDS", "portfolio_manager")
|
||||
monkeypatch.setattr(main_module, "EvoAgent", DummyEvoAgent)
|
||||
monkeypatch.setattr(
|
||||
main_module,
|
||||
"create_agent_toolkit",
|
||||
lambda *args, **kwargs: "pm-toolkit",
|
||||
)
|
||||
|
||||
agent = main_module._create_portfolio_manager_agent(
|
||||
config_name="demo",
|
||||
model="model",
|
||||
formatter="formatter",
|
||||
initial_cash=12345.0,
|
||||
margin_requirement=0.4,
|
||||
skills_manager=DummySkillsManager(),
|
||||
active_skill_map={"portfolio_manager": [Path("/tmp/skill")]},
|
||||
long_term_memory=None,
|
||||
)
|
||||
|
||||
assert isinstance(agent, DummyEvoAgent)
|
||||
assert created["agent_id"] == "portfolio_manager"
|
||||
assert created["config_name"] == "demo"
|
||||
assert created["prompt_files"] == ["SOUL.md"]
|
||||
assert created["initial_cash"] == 12345.0
|
||||
assert created["margin_requirement"] == 0.4
|
||||
assert agent.toolkit == "pm-toolkit"
|
||||
assert agent.workspace_id == "demo"
|
||||
|
||||
|
||||
def test_evo_agent_reload_runtime_assets_refreshes_prompt_files(monkeypatch, tmp_path):
|
||||
from backend.agents.base.evo_agent import EvoAgent
|
||||
|
||||
workspace_dir = tmp_path / "runs" / "demo" / "agents" / "fundamentals_analyst"
|
||||
workspace_dir.mkdir(parents=True, exist_ok=True)
|
||||
(workspace_dir / "SOUL.md").write_text("soul-v1", encoding="utf-8")
|
||||
(workspace_dir / "MEMORY.md").write_text("memory-v1", encoding="utf-8")
|
||||
(workspace_dir / "agent.yaml").write_text(
|
||||
"prompt_files:\n"
|
||||
" - SOUL.md\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
class DummyToolkit:
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.registered = []
|
||||
|
||||
def register_agent_skill(self, path):
|
||||
self.registered.append(path)
|
||||
|
||||
monkeypatch.setattr(
|
||||
"backend.agents.base.evo_agent.Toolkit",
|
||||
DummyToolkit,
|
||||
)
|
||||
|
||||
class DummyModel:
|
||||
pass
|
||||
|
||||
class DummyFormatter:
|
||||
pass
|
||||
|
||||
agent = EvoAgent(
|
||||
agent_id="fundamentals_analyst",
|
||||
config_name="demo",
|
||||
workspace_dir=workspace_dir,
|
||||
model=DummyModel(),
|
||||
formatter=DummyFormatter(),
|
||||
skills_manager=type(
|
||||
"SkillsManagerStub",
|
||||
(),
|
||||
{
|
||||
"get_agent_active_root": staticmethod(lambda config_name, agent_id: workspace_dir / "skills" / "active"),
|
||||
"list_active_skill_metadata": staticmethod(lambda config_name, agent_id: []),
|
||||
},
|
||||
)(),
|
||||
)
|
||||
|
||||
assert "soul-v1" in agent._sys_prompt
|
||||
assert "memory-v1" not in agent._sys_prompt
|
||||
|
||||
(workspace_dir / "agent.yaml").write_text(
|
||||
"prompt_files:\n"
|
||||
" - SOUL.md\n"
|
||||
" - MEMORY.md\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
agent.reload_runtime_assets(active_skill_dirs=[])
|
||||
|
||||
assert "memory-v1" in agent._sys_prompt
|
||||
assert agent.workspace_id == "demo"
|
||||
assert agent.config == {"config_name": "demo"}
|
||||
|
||||
|
||||
|
||||
|
||||
def test_pipeline_resolve_evo_agent_ids_filters_unsupported_roles(monkeypatch):
|
||||
"""Test that pipeline._resolve_evo_agent_ids filters unsupported roles."""
|
||||
from backend.core import pipeline as pipeline_module
|
||||
|
||||
monkeypatch.setenv(
|
||||
"EVO_AGENT_IDS",
|
||||
"fundamentals_analyst,portfolio_manager,unknown,technical_analyst",
|
||||
)
|
||||
|
||||
resolved = pipeline_module._resolve_evo_agent_ids()
|
||||
|
||||
assert resolved == {"fundamentals_analyst", "portfolio_manager", "technical_analyst"}
|
||||
|
||||
|
||||
def test_pipeline_create_runtime_analyst_uses_evo_agent_when_enabled(monkeypatch, tmp_path):
|
||||
"""Test that _create_runtime_analyst creates EvoAgent when in EVO_AGENT_IDS."""
|
||||
from backend.core import pipeline as pipeline_module
|
||||
|
||||
created = {}
|
||||
|
||||
class DummyEvoAgent:
|
||||
def __init__(self, **kwargs):
|
||||
created.update(kwargs)
|
||||
self.toolkit = None
|
||||
|
||||
class DummyAnalystAgent:
|
||||
def __init__(self, **kwargs):
|
||||
created.update(kwargs)
|
||||
self.toolkit = None
|
||||
|
||||
monkeypatch.setenv("EVO_AGENT_IDS", "fundamentals_analyst")
|
||||
monkeypatch.setattr(pipeline_module, "EvoAgent", DummyEvoAgent)
|
||||
monkeypatch.setattr(pipeline_module, "AnalystAgent", DummyAnalystAgent)
|
||||
monkeypatch.setattr(
|
||||
pipeline_module,
|
||||
"create_agent_toolkit",
|
||||
lambda *args, **kwargs: "toolkit",
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
pipeline_module,
|
||||
"get_agent_model",
|
||||
lambda x: "model",
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
pipeline_module,
|
||||
"get_agent_formatter",
|
||||
lambda x: "formatter",
|
||||
)
|
||||
|
||||
# Create a mock pipeline instance
|
||||
class MockPM:
|
||||
def __init__(self):
|
||||
self.config = {"config_name": "demo"}
|
||||
|
||||
pipeline = pipeline_module.TradingPipeline(
|
||||
analysts=[],
|
||||
risk_manager=None,
|
||||
portfolio_manager=MockPM(),
|
||||
)
|
||||
|
||||
# Mock workspace_manager methods
|
||||
monkeypatch.setattr(
|
||||
pipeline_module.WorkspaceManager,
|
||||
"ensure_agent_assets",
|
||||
lambda *args, **kwargs: None,
|
||||
)
|
||||
|
||||
result = pipeline._create_runtime_analyst("test_analyst", "fundamentals_analyst")
|
||||
|
||||
assert "Created runtime analyst" in result
|
||||
assert created.get("agent_id") == "test_analyst"
|
||||
assert created.get("config_name") == "demo"
|
||||
|
||||
|
||||
def test_pipeline_create_runtime_analyst_uses_legacy_when_not_in_evo_ids(monkeypatch, tmp_path):
|
||||
"""Test that _create_runtime_analyst creates legacy AnalystAgent when not in EVO_AGENT_IDS."""
|
||||
from backend.core import pipeline as pipeline_module
|
||||
|
||||
created = {}
|
||||
|
||||
class DummyEvoAgent:
|
||||
def __init__(self, **kwargs):
|
||||
created.update(kwargs)
|
||||
self.toolkit = None
|
||||
|
||||
class DummyAnalystAgent:
|
||||
def __init__(self, **kwargs):
|
||||
created.update(kwargs)
|
||||
self.toolkit = None
|
||||
|
||||
# EVO_AGENT_IDS does not include fundamentals_analyst
|
||||
monkeypatch.setenv("EVO_AGENT_IDS", "technical_analyst")
|
||||
monkeypatch.setattr(pipeline_module, "EvoAgent", DummyEvoAgent)
|
||||
monkeypatch.setattr(pipeline_module, "AnalystAgent", DummyAnalystAgent)
|
||||
monkeypatch.setattr(
|
||||
pipeline_module,
|
||||
"create_agent_toolkit",
|
||||
lambda *args, **kwargs: "toolkit",
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
pipeline_module,
|
||||
"get_agent_model",
|
||||
lambda x: "model",
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
pipeline_module,
|
||||
"get_agent_formatter",
|
||||
lambda x: "formatter",
|
||||
)
|
||||
|
||||
# Create a mock pipeline instance
|
||||
class MockPM:
|
||||
def __init__(self):
|
||||
self.config = {"config_name": "demo"}
|
||||
|
||||
pipeline = pipeline_module.TradingPipeline(
|
||||
analysts=[],
|
||||
risk_manager=None,
|
||||
portfolio_manager=MockPM(),
|
||||
)
|
||||
|
||||
# Mock workspace_manager methods
|
||||
monkeypatch.setattr(
|
||||
pipeline_module.WorkspaceManager,
|
||||
"ensure_agent_assets",
|
||||
lambda *args, **kwargs: None,
|
||||
)
|
||||
|
||||
result = pipeline._create_runtime_analyst("test_analyst", "fundamentals_analyst")
|
||||
|
||||
assert "Created runtime analyst" in result
|
||||
# Should use legacy AnalystAgent
|
||||
assert created.get("analyst_type") == "fundamentals_analyst"
|
||||
|
||||
|
||||
def test_main_resolve_evo_agent_ids_returns_all_by_default(monkeypatch):
|
||||
"""Test that _resolve_evo_agent_ids returns all supported roles by default."""
|
||||
from backend import main as main_module
|
||||
from backend.config.constants import ANALYST_TYPES
|
||||
|
||||
# Unset EVO_AGENT_IDS to test default behavior
|
||||
monkeypatch.delenv("EVO_AGENT_IDS", raising=False)
|
||||
|
||||
resolved = main_module._resolve_evo_agent_ids()
|
||||
|
||||
expected = set(ANALYST_TYPES) | {"risk_manager", "portfolio_manager"}
|
||||
assert resolved == expected
|
||||
|
||||
|
||||
def test_evo_agent_supports_long_term_memory(monkeypatch, tmp_path):
|
||||
"""Test that EvoAgent can be created with long_term_memory."""
|
||||
from backend import main as main_module
|
||||
|
||||
created = {}
|
||||
|
||||
class DummySkillsManager:
|
||||
def get_agent_asset_dir(self, config_name, agent_id):
|
||||
path = tmp_path / "runs" / config_name / "agents" / agent_id
|
||||
path.mkdir(parents=True, exist_ok=True)
|
||||
(path / "agent.yaml").write_text(
|
||||
"prompt_files:\n - SOUL.md\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
return path
|
||||
|
||||
class DummyEvoAgent:
|
||||
def __init__(self, **kwargs):
|
||||
created.update(kwargs)
|
||||
self.toolkit = None
|
||||
|
||||
# Default: all roles use EvoAgent
|
||||
monkeypatch.delenv("EVO_AGENT_IDS", raising=False)
|
||||
monkeypatch.setattr(main_module, "EvoAgent", DummyEvoAgent)
|
||||
monkeypatch.setattr(main_module, "create_agent_toolkit", lambda *args, **kwargs: "toolkit")
|
||||
|
||||
# Create with long_term_memory - should still use EvoAgent
|
||||
dummy_memory = {"type": "reme"}
|
||||
agent = main_module._create_analyst_agent(
|
||||
analyst_type="fundamentals_analyst",
|
||||
config_name="demo",
|
||||
model="model",
|
||||
formatter="formatter",
|
||||
skills_manager=DummySkillsManager(),
|
||||
active_skill_map={"fundamentals_analyst": []},
|
||||
long_term_memory=dummy_memory,
|
||||
)
|
||||
|
||||
assert isinstance(agent, DummyEvoAgent)
|
||||
assert created["agent_id"] == "fundamentals_analyst"
|
||||
assert created["long_term_memory"] is dummy_memory
|
||||
|
||||
|
||||
def test_evo_agent_legacy_mode(monkeypatch):
|
||||
"""Test that EVO_AGENT_IDS=legacy disables EvoAgent."""
|
||||
from backend import main as main_module
|
||||
|
||||
monkeypatch.setenv("EVO_AGENT_IDS", "legacy")
|
||||
|
||||
resolved = main_module._resolve_evo_agent_ids()
|
||||
assert resolved == set()
|
||||
@@ -5,6 +5,7 @@ from types import SimpleNamespace
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.core.state_sync import StateSync
|
||||
from backend.services import gateway_cycle_support, gateway_runtime_support
|
||||
|
||||
|
||||
@@ -43,6 +44,12 @@ class _DummyStorage:
|
||||
self.initial_cash = 100000.0
|
||||
self.is_live_session_active = False
|
||||
self.server_state_updates = []
|
||||
self.max_feed_history = 200
|
||||
self.runtime_db = SimpleNamespace(
|
||||
get_recent_feed_events=lambda limit=200: [],
|
||||
get_last_day_feed_events=lambda current_date=None, limit=200: [],
|
||||
)
|
||||
self._persisted_server_state = {}
|
||||
|
||||
def can_apply_initial_cash(self):
|
||||
return True
|
||||
@@ -54,6 +61,9 @@ class _DummyStorage:
|
||||
def update_server_state_from_dashboard(self, state):
|
||||
self.server_state_updates.append(state)
|
||||
|
||||
def read_persisted_server_state(self):
|
||||
return dict(self._persisted_server_state)
|
||||
|
||||
def load_file(self, name):
|
||||
if name == "summary":
|
||||
return {"totalAssetValue": self.initial_cash}
|
||||
@@ -199,3 +209,70 @@ async def test_refresh_market_store_for_watchlist_emits_system_messages(monkeypa
|
||||
|
||||
assert gateway.state_sync.system_messages[0] == "正在同步自选股市场数据: AAPL, MSFT"
|
||||
assert "自选股市场数据已同步:" in gateway.state_sync.system_messages[1]
|
||||
|
||||
|
||||
def test_initial_state_payload_prefers_dashboard_snapshot_for_top_level_views():
|
||||
storage = _DummyStorage()
|
||||
sync = StateSync(storage=storage)
|
||||
sync._state = {
|
||||
"holdings": [],
|
||||
"trades": [],
|
||||
"stats": {},
|
||||
"leaderboard": [],
|
||||
"portfolio": {"total_value": 100000.0},
|
||||
}
|
||||
|
||||
payload = sync.get_initial_state_payload(include_dashboard=True)
|
||||
|
||||
assert payload["holdings"] == []
|
||||
assert payload["trades"] == []
|
||||
assert payload["stats"] == {}
|
||||
assert payload["leaderboard"] == []
|
||||
assert payload["dashboard"]["summary"]["totalAssetValue"] == 100000.0
|
||||
|
||||
|
||||
def test_initial_state_payload_uses_dashboard_snapshot_for_sparse_runtime_state():
|
||||
class SnapshotStorage(_DummyStorage):
|
||||
def build_dashboard_snapshot_from_state(self, state):
|
||||
return {
|
||||
"summary": {"totalAssetValue": 123456.0},
|
||||
"holdings": [{"ticker": "AAPL"}],
|
||||
"stats": {"totalTrades": 3},
|
||||
"trades": [{"ticker": "AAPL"}],
|
||||
"leaderboard": [{"agentId": "technical_analyst"}],
|
||||
}
|
||||
|
||||
sync = StateSync(storage=SnapshotStorage())
|
||||
sync._state = {
|
||||
"holdings": [],
|
||||
"trades": [],
|
||||
"stats": {},
|
||||
"leaderboard": [],
|
||||
}
|
||||
|
||||
payload = sync.get_initial_state_payload(include_dashboard=True)
|
||||
|
||||
assert payload["holdings"][0]["ticker"] == "AAPL"
|
||||
assert payload["trades"][0]["ticker"] == "AAPL"
|
||||
assert payload["stats"]["totalTrades"] == 3
|
||||
assert payload["leaderboard"][0]["agentId"] == "technical_analyst"
|
||||
|
||||
|
||||
def test_initial_state_payload_falls_back_to_persisted_portfolio():
|
||||
storage = _DummyStorage()
|
||||
storage._persisted_server_state = {
|
||||
"portfolio": {
|
||||
"total_value": 123456.0,
|
||||
"pnl_percent": 12.34,
|
||||
"equity": [{"t": 1, "v": 123456.0}],
|
||||
}
|
||||
}
|
||||
sync = StateSync(storage=storage)
|
||||
sync._state = {
|
||||
"portfolio": {},
|
||||
}
|
||||
|
||||
payload = sync.get_initial_state_payload(include_dashboard=True)
|
||||
|
||||
assert payload["portfolio"]["total_value"] == 123456.0
|
||||
assert payload["portfolio"]["pnl_percent"] == 12.34
|
||||
|
||||
225
backend/tests/test_migration_boundaries.py
Normal file
225
backend/tests/test_migration_boundaries.py
Normal file
@@ -0,0 +1,225 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Guardrails around partially migrated agent-loading paths."""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
from backend.agents.base.tool_guard import TOOL_GUARD_STORE, ToolApprovalRequest
|
||||
from backend.apps.agent_service import create_app
|
||||
from backend.core.pipeline import TradingPipeline
|
||||
|
||||
|
||||
class _FakeStore:
|
||||
"""Fake MarketStore for testing."""
|
||||
|
||||
def get_ticker_watermarks(self, symbol):
|
||||
return {"symbol": symbol, "last_news_fetch": "2026-12-31"}
|
||||
|
||||
def get_news_timeline_enriched(self, symbol, start_date=None, end_date=None):
|
||||
return [{"date": end_date, "count": 1}]
|
||||
|
||||
def get_news_items(self, symbol, start_date=None, end_date=None, limit=100):
|
||||
return [{"id": "news-raw-1", "ticker": symbol, "title": "Raw Title", "date": end_date}]
|
||||
|
||||
def get_news_items_enriched(self, symbol, start_date=None, end_date=None, trade_date=None, limit=100):
|
||||
return [{"id": "news-1", "ticker": symbol, "title": "Title", "date": trade_date or end_date}]
|
||||
|
||||
def upsert_news_analysis(self, symbol, rows):
|
||||
return len(rows)
|
||||
|
||||
def get_analyzed_news_ids(self, symbol, start_date=None, end_date=None):
|
||||
return set()
|
||||
|
||||
def get_news_categories_enriched(self, symbol, start_date=None, end_date=None, limit=200):
|
||||
return {"market": {"label": "market", "count": 1, "article_ids": ["news-1"]}}
|
||||
|
||||
def get_news_by_ids_enriched(self, symbol, article_ids):
|
||||
return [{"id": article_ids[0], "ticker": symbol, "title": "Picked"}]
|
||||
|
||||
|
||||
def test_legacy_adapter_module_has_been_removed():
|
||||
compat_path = Path(__file__).resolve().parents[1] / "agents" / "compat.py"
|
||||
assert compat_path.exists() is False
|
||||
|
||||
|
||||
def test_pipeline_workspace_loading_entrypoints_have_been_removed():
|
||||
pipeline = TradingPipeline(
|
||||
analysts=[],
|
||||
risk_manager=object(),
|
||||
portfolio_manager=object(),
|
||||
)
|
||||
|
||||
assert hasattr(pipeline, "load_agents_from_workspace") is False
|
||||
assert hasattr(pipeline, "reload_agents_from_workspace") is False
|
||||
|
||||
|
||||
def test_pipeline_sync_agent_runtime_context_sets_session_and_workspace():
|
||||
pm = type("PM", (), {"config": {"config_name": "demo"}})()
|
||||
analyst = type("Analyst", (), {})()
|
||||
pipeline = TradingPipeline(
|
||||
analysts=[analyst],
|
||||
risk_manager=object(),
|
||||
portfolio_manager=pm,
|
||||
)
|
||||
|
||||
pipeline._sync_agent_runtime_context([analyst], session_key="2026-03-30")
|
||||
|
||||
assert analyst.session_id == "2026-03-30"
|
||||
assert analyst.workspace_id == "demo"
|
||||
|
||||
|
||||
def test_guard_approve_endpoint_notifies_pending_request():
|
||||
record = TOOL_GUARD_STORE.create_pending(
|
||||
tool_name="write_file",
|
||||
tool_input={"path": "demo.txt"},
|
||||
agent_id="fundamentals_analyst",
|
||||
workspace_id="demo",
|
||||
)
|
||||
pending = ToolApprovalRequest(
|
||||
approval_id=record.approval_id,
|
||||
tool_name=record.tool_name,
|
||||
tool_input=record.tool_input,
|
||||
tool_call_id="call_1",
|
||||
session_id=None,
|
||||
)
|
||||
record.pending_request = pending
|
||||
|
||||
with TestClient(create_app()) as client:
|
||||
response = client.post(
|
||||
"/api/guard/approve",
|
||||
json={"approval_id": record.approval_id, "one_time": True, "expires_in_minutes": 30},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
assert response.json()["run_id"] == "demo"
|
||||
assert response.json()["workspace_id"] == "demo"
|
||||
assert response.json()["scope_type"] == "runtime_run"
|
||||
assert pending.approved is True
|
||||
assert asyncio.run(pending.wait_for_approval(timeout=0.01)) is True
|
||||
|
||||
|
||||
def test_runtime_api_backward_compatibility_paths(monkeypatch, tmp_path):
|
||||
"""Test that runtime API paths maintain backward compatibility."""
|
||||
from backend.api import runtime as runtime_module
|
||||
|
||||
run_dir = tmp_path / "runs" / "demo"
|
||||
state_dir = run_dir / "state"
|
||||
state_dir.mkdir(parents=True)
|
||||
(state_dir / "runtime_state.json").write_text(
|
||||
json.dumps(
|
||||
{
|
||||
"context": {
|
||||
"config_name": "demo",
|
||||
"run_dir": str(run_dir),
|
||||
"bootstrap_values": {"tickers": ["AAPL"]},
|
||||
},
|
||||
"agents": [],
|
||||
"events": [],
|
||||
}
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
monkeypatch.setattr(runtime_module, "PROJECT_ROOT", tmp_path)
|
||||
monkeypatch.setattr(runtime_module, "_is_gateway_running", lambda: True)
|
||||
runtime_module.get_runtime_state().gateway_port = 8765
|
||||
|
||||
from backend.apps.runtime_service import create_app
|
||||
|
||||
with TestClient(create_app()) as client:
|
||||
# Test that old path patterns still work
|
||||
assert client.get("/api/runtime/config").status_code == 200
|
||||
assert client.get("/api/runtime/agents").status_code == 200
|
||||
assert client.get("/api/runtime/events").status_code == 200
|
||||
assert client.get("/api/runtime/history").status_code == 200
|
||||
assert client.get("/api/runtime/context").status_code == 200
|
||||
|
||||
|
||||
def test_trading_service_backward_compatibility_paths(monkeypatch):
|
||||
"""Test that trading API paths maintain backward compatibility."""
|
||||
from backend.apps.trading_service import create_app
|
||||
|
||||
monkeypatch.setattr(
|
||||
"backend.domains.trading.get_prices_payload",
|
||||
lambda ticker, start_date, end_date: {"ticker": ticker, "prices": []},
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"backend.domains.trading.get_financials_payload",
|
||||
lambda ticker, end_date, period, limit: {"financial_metrics": []},
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"backend.domains.trading.get_news_payload",
|
||||
lambda ticker, end_date, start_date=None, limit=1000: {"news": []},
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"backend.domains.trading.get_market_status_payload",
|
||||
lambda: {"status": "open"},
|
||||
)
|
||||
|
||||
with TestClient(create_app()) as client:
|
||||
# Test that old path patterns still work
|
||||
assert client.get("/api/prices?ticker=AAPL&start_date=2026-01-01&end_date=2026-03-01").status_code == 200
|
||||
assert client.get("/api/financials?ticker=AAPL&end_date=2026-03-01").status_code == 200
|
||||
assert client.get("/api/news?ticker=AAPL&end_date=2026-03-01").status_code == 200
|
||||
assert client.get("/api/market/status").status_code == 200
|
||||
|
||||
|
||||
def test_news_service_backward_compatibility_paths(monkeypatch):
|
||||
"""Test that news API paths maintain backward compatibility."""
|
||||
from backend.apps.news_service import create_app
|
||||
from backend.apps import news_service as news_service_module
|
||||
|
||||
app = create_app()
|
||||
app.dependency_overrides[news_service_module.get_market_store] = lambda: _FakeStore()
|
||||
|
||||
monkeypatch.setattr(
|
||||
"backend.domains.news.enrich_news_for_symbol",
|
||||
lambda *args, **kwargs: {"symbol": "AAPL", "analyzed": 1, "news": []},
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"backend.domains.news.get_or_create_stock_story",
|
||||
lambda store, symbol, as_of_date: {"symbol": symbol, "as_of_date": as_of_date, "story": ""},
|
||||
)
|
||||
|
||||
with TestClient(app) as client:
|
||||
# Test that old path patterns still work
|
||||
assert client.get("/api/enriched-news?ticker=AAPL&end_date=2026-03-01").status_code == 200
|
||||
assert client.get("/api/stories/AAPL?as_of_date=2026-03-01").status_code == 200
|
||||
|
||||
|
||||
def test_service_ports_match_documentation():
|
||||
"""Verify that service ports match documentation."""
|
||||
import backend.apps.agent_service as agent_service
|
||||
import backend.apps.news_service as news_service
|
||||
import backend.apps.runtime_service as runtime_service
|
||||
import backend.apps.trading_service as trading_service
|
||||
|
||||
# These ports are documented in README.md and start-dev.sh
|
||||
assert "8000" in agent_service.__file__ or True # agent_service doesn't hardcode port
|
||||
assert "8001" in trading_service.__file__ or True # trading_service doesn't hardcode port
|
||||
assert "8002" in news_service.__file__ or True # news_service doesn't hardcode port
|
||||
assert "8003" in runtime_service.__file__ or True # runtime_service doesn't hardcode port
|
||||
|
||||
# Verify the __main__ blocks use correct ports
|
||||
import ast
|
||||
import inspect
|
||||
|
||||
def get_main_port(module):
|
||||
source = inspect.getsource(module)
|
||||
tree = ast.parse(source)
|
||||
for node in ast.walk(tree):
|
||||
if isinstance(node, ast.Call):
|
||||
for kw in node.keywords:
|
||||
if kw.arg == "port" and isinstance(kw.value, ast.Constant):
|
||||
return kw.value.value
|
||||
return None
|
||||
|
||||
assert get_main_port(agent_service) == 8000
|
||||
assert get_main_port(trading_service) == 8001
|
||||
assert get_main_port(news_service) == 8002
|
||||
assert get_main_port(runtime_service) == 8003
|
||||
@@ -178,3 +178,84 @@ def test_news_service_range_explain(monkeypatch):
|
||||
|
||||
assert response.status_code == 200
|
||||
assert response.json()["result"]["news_count"] == 1
|
||||
|
||||
|
||||
def test_news_service_contract_stability():
|
||||
"""Verify news service API maintains contract stability."""
|
||||
app = create_app()
|
||||
routes = {route.path: route for route in app.routes if hasattr(route, "methods")}
|
||||
|
||||
# Health endpoint
|
||||
assert "/health" in routes
|
||||
|
||||
# News/explain endpoints
|
||||
assert "/api/enriched-news" in routes
|
||||
assert "/api/news-for-date" in routes
|
||||
assert "/api/news-timeline" in routes
|
||||
assert "/api/categories" in routes
|
||||
assert "/api/similar-days" in routes
|
||||
assert "/api/stories/{ticker}" in routes
|
||||
assert "/api/range-explain" in routes
|
||||
|
||||
# Verify all are GET endpoints (read-only service)
|
||||
for path in ["/api/enriched-news", "/api/news-for-date", "/api/news-timeline",
|
||||
"/api/categories", "/api/similar-days", "/api/stories/{ticker}",
|
||||
"/api/range-explain"]:
|
||||
assert "GET" in routes[path].methods
|
||||
|
||||
|
||||
def test_news_service_enriched_news_contract(monkeypatch):
|
||||
"""Test enriched news endpoint maintains response contract."""
|
||||
app = create_app()
|
||||
app.dependency_overrides.clear()
|
||||
from backend.apps import news_service as news_service_module
|
||||
|
||||
app.dependency_overrides[news_service_module.get_market_store] = lambda: _FakeStore()
|
||||
monkeypatch.setattr(
|
||||
"backend.domains.news.enrich_news_for_symbol",
|
||||
lambda *args, **kwargs: {"symbol": "AAPL", "analyzed": 1, "news": [{"id": "1", "title": "Test"}]},
|
||||
)
|
||||
|
||||
with TestClient(app) as client:
|
||||
response = client.get(
|
||||
"/api/enriched-news",
|
||||
params={"ticker": "AAPL", "end_date": "2026-03-23"},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
payload = response.json()
|
||||
assert "news" in payload
|
||||
|
||||
|
||||
def test_news_service_stories_contract(monkeypatch):
|
||||
"""Test stories endpoint maintains response contract."""
|
||||
app = create_app()
|
||||
from backend.apps import news_service as news_service_module
|
||||
|
||||
app.dependency_overrides[news_service_module.get_market_store] = lambda: _FakeStore()
|
||||
monkeypatch.setattr(
|
||||
"backend.domains.news.enrich_news_for_symbol",
|
||||
lambda *args, **kwargs: {"symbol": "AAPL", "analyzed": 1},
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"backend.domains.news.get_or_create_stock_story",
|
||||
lambda store, symbol, as_of_date: {
|
||||
"symbol": symbol,
|
||||
"as_of_date": as_of_date,
|
||||
"story": "story body",
|
||||
"source": "local",
|
||||
"headline": "Test Headline",
|
||||
},
|
||||
)
|
||||
|
||||
with TestClient(app) as client:
|
||||
response = client.get(
|
||||
"/api/stories/AAPL",
|
||||
params={"as_of_date": "2026-03-23"},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
payload = response.json()
|
||||
assert "symbol" in payload
|
||||
assert "as_of_date" in payload
|
||||
assert "story" in payload
|
||||
|
||||
@@ -242,7 +242,6 @@ def test_runtime_cleanup_endpoint_prunes_old_runs(monkeypatch, tmp_path):
|
||||
def test_runtime_history_lists_recent_runs(monkeypatch, tmp_path):
|
||||
run_dir = tmp_path / "runs" / "20260324_120000"
|
||||
(run_dir / "state").mkdir(parents=True)
|
||||
(run_dir / "team_dashboard").mkdir(parents=True)
|
||||
(run_dir / "state" / "runtime_state.json").write_text(
|
||||
json.dumps(
|
||||
{
|
||||
@@ -256,8 +255,13 @@ def test_runtime_history_lists_recent_runs(monkeypatch, tmp_path):
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
(run_dir / "team_dashboard" / "summary.json").write_text(
|
||||
json.dumps({"totalTrades": 3, "totalAssetValue": 123456.0}),
|
||||
(run_dir / "state" / "server_state.json").write_text(
|
||||
json.dumps(
|
||||
{
|
||||
"portfolio": {"total_value": 123456.0},
|
||||
"trades": [{}, {}, {}],
|
||||
}
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
@@ -270,6 +274,7 @@ def test_runtime_history_lists_recent_runs(monkeypatch, tmp_path):
|
||||
payload = response.json()
|
||||
assert payload["runs"][0]["run_id"] == "20260324_120000"
|
||||
assert payload["runs"][0]["total_trades"] == 3
|
||||
assert payload["runs"][0]["total_asset_value"] == 123456.0
|
||||
|
||||
|
||||
def test_restore_run_assets_copies_state(monkeypatch, tmp_path):
|
||||
@@ -278,6 +283,7 @@ def test_restore_run_assets_copies_state(monkeypatch, tmp_path):
|
||||
(source_run / "state").mkdir(parents=True)
|
||||
(source_run / "agents").mkdir(parents=True)
|
||||
(source_run / "team_dashboard" / "_internal_state.json").write_text("{}", encoding="utf-8")
|
||||
(source_run / "team_dashboard" / "summary.json").write_text("{}", encoding="utf-8")
|
||||
(source_run / "state" / "server_state.json").write_text("{}", encoding="utf-8")
|
||||
|
||||
target_run = tmp_path / "runs" / "20260324_130000"
|
||||
@@ -288,6 +294,237 @@ def test_restore_run_assets_copies_state(monkeypatch, tmp_path):
|
||||
|
||||
assert (target_run / "team_dashboard" / "_internal_state.json").exists()
|
||||
assert (target_run / "state" / "server_state.json").exists()
|
||||
assert not (target_run / "team_dashboard" / "summary.json").exists()
|
||||
|
||||
|
||||
def test_runtime_service_routes_contract_stability():
|
||||
"""Verify runtime API routes maintain contract stability."""
|
||||
app = create_app()
|
||||
routes = {route.path: route for route in app.routes if hasattr(route, "methods")}
|
||||
|
||||
# Core runtime lifecycle endpoints
|
||||
assert "/api/runtime/start" in routes
|
||||
assert "/api/runtime/stop" in routes
|
||||
assert "/api/runtime/restart" in routes
|
||||
assert "/api/runtime/current" in routes
|
||||
|
||||
# Configuration endpoints
|
||||
assert "/api/runtime/config" in routes
|
||||
|
||||
# Query endpoints
|
||||
assert "/api/runtime/agents" in routes
|
||||
assert "/api/runtime/events" in routes
|
||||
assert "/api/runtime/history" in routes
|
||||
assert "/api/runtime/context" in routes
|
||||
assert "/api/runtime/logs" in routes
|
||||
|
||||
# Gateway endpoints
|
||||
assert "/api/runtime/gateway/status" in routes
|
||||
assert "/api/runtime/gateway/port" in routes
|
||||
|
||||
# Maintenance endpoints
|
||||
assert "/api/runtime/cleanup" in routes
|
||||
|
||||
|
||||
def test_runtime_service_start_stop_lifecycle_contract(monkeypatch, tmp_path):
|
||||
"""Test the start/stop lifecycle maintains expected contract."""
|
||||
run_dir = tmp_path / "runs" / "test_run"
|
||||
state_dir = run_dir / "state"
|
||||
state_dir.mkdir(parents=True)
|
||||
# Create runtime_state.json so /api/runtime/current can find the context after stop
|
||||
(state_dir / "runtime_state.json").write_text(
|
||||
json.dumps(
|
||||
{
|
||||
"context": {
|
||||
"config_name": "test_run",
|
||||
"run_dir": str(run_dir),
|
||||
"bootstrap_values": {"tickers": ["AAPL", "MSFT"]},
|
||||
}
|
||||
}
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
class _DummyManager:
|
||||
def __init__(self, config_name, run_dir, bootstrap):
|
||||
self.config_name = config_name
|
||||
self.run_dir = Path(run_dir)
|
||||
self.bootstrap = bootstrap
|
||||
self.context = None
|
||||
|
||||
def prepare_run(self):
|
||||
self.context = type(
|
||||
"Ctx",
|
||||
(),
|
||||
{
|
||||
"config_name": self.config_name,
|
||||
"run_dir": self.run_dir,
|
||||
"bootstrap_values": self.bootstrap,
|
||||
},
|
||||
)()
|
||||
return self.context
|
||||
|
||||
class _DummyProcess:
|
||||
def poll(self):
|
||||
return None
|
||||
|
||||
monkeypatch.setattr(runtime_module, "PROJECT_ROOT", tmp_path)
|
||||
monkeypatch.setattr(runtime_module, "_find_available_port", lambda start_port=8765, max_port=9000: 8765)
|
||||
monkeypatch.setattr(runtime_module, "_start_gateway_process", lambda **kwargs: _DummyProcess())
|
||||
monkeypatch.setattr(runtime_module, "_stop_gateway", lambda: True)
|
||||
monkeypatch.setattr("backend.runtime.manager.TradingRuntimeManager", _DummyManager)
|
||||
runtime_state = runtime_module.get_runtime_state()
|
||||
runtime_state.gateway_process = None
|
||||
|
||||
with TestClient(create_app()) as client:
|
||||
# Start runtime
|
||||
start_response = client.post(
|
||||
"/api/runtime/start",
|
||||
json={
|
||||
"launch_mode": "fresh",
|
||||
"tickers": ["AAPL", "MSFT"],
|
||||
"schedule_mode": "daily",
|
||||
"interval_minutes": 60,
|
||||
"trigger_time": "09:30",
|
||||
"max_comm_cycles": 2,
|
||||
"initial_cash": 100000.0,
|
||||
"margin_requirement": 0.0,
|
||||
"enable_memory": False,
|
||||
"mode": "live",
|
||||
"poll_interval": 10,
|
||||
},
|
||||
)
|
||||
|
||||
assert start_response.status_code == 200
|
||||
start_payload = start_response.json()
|
||||
assert "run_id" in start_payload
|
||||
assert "status" in start_payload
|
||||
assert "run_dir" in start_payload
|
||||
assert "gateway_port" in start_payload
|
||||
assert "message" in start_payload
|
||||
assert start_payload["status"] == "started"
|
||||
|
||||
# Get current runtime while running
|
||||
current_response = client.get("/api/runtime/current")
|
||||
assert current_response.status_code == 200
|
||||
current_payload = current_response.json()
|
||||
assert "run_id" in current_payload
|
||||
assert "run_dir" in current_payload
|
||||
assert "is_running" in current_payload
|
||||
assert "gateway_port" in current_payload
|
||||
assert "bootstrap" in current_payload
|
||||
|
||||
# Stop runtime
|
||||
stop_response = client.post("/api/runtime/stop?force=true")
|
||||
assert stop_response.status_code == 200
|
||||
stop_payload = stop_response.json()
|
||||
assert "status" in stop_payload
|
||||
assert "message" in stop_payload
|
||||
assert stop_payload["status"] == "stopped"
|
||||
|
||||
|
||||
def test_runtime_service_agents_events_contract(monkeypatch, tmp_path):
|
||||
"""Test agents and events endpoints maintain contract."""
|
||||
run_dir = tmp_path / "runs" / "demo"
|
||||
state_dir = run_dir / "state"
|
||||
state_dir.mkdir(parents=True)
|
||||
(state_dir / "runtime_state.json").write_text(
|
||||
json.dumps(
|
||||
{
|
||||
"context": {
|
||||
"config_name": "demo",
|
||||
"run_dir": str(run_dir),
|
||||
"bootstrap_values": {"tickers": ["AAPL"]},
|
||||
},
|
||||
"agents": [
|
||||
{
|
||||
"agent_id": "fundamentals_analyst",
|
||||
"status": "idle",
|
||||
"last_session": "2026-03-30",
|
||||
"last_updated": "2026-03-30T10:00:00",
|
||||
},
|
||||
{
|
||||
"agent_id": "technical_analyst",
|
||||
"status": "analyzing",
|
||||
"last_session": None,
|
||||
"last_updated": "2026-03-30T10:05:00",
|
||||
},
|
||||
],
|
||||
"events": [
|
||||
{
|
||||
"timestamp": "2026-03-30T10:00:00",
|
||||
"event": "agent_registered",
|
||||
"details": {"agent_id": "fundamentals_analyst"},
|
||||
"session": "2026-03-30",
|
||||
}
|
||||
],
|
||||
}
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
monkeypatch.setattr(runtime_module, "PROJECT_ROOT", tmp_path)
|
||||
monkeypatch.setattr(runtime_module, "_is_gateway_running", lambda: True)
|
||||
runtime_module.get_runtime_state().gateway_port = 8765
|
||||
|
||||
with TestClient(create_app()) as client:
|
||||
# Agents endpoint
|
||||
agents_response = client.get("/api/runtime/agents")
|
||||
assert agents_response.status_code == 200
|
||||
agents_payload = agents_response.json()
|
||||
assert "agents" in agents_payload
|
||||
assert len(agents_payload["agents"]) == 2
|
||||
agent = agents_payload["agents"][0]
|
||||
assert "agent_id" in agent
|
||||
assert "status" in agent
|
||||
assert "last_session" in agent
|
||||
assert "last_updated" in agent
|
||||
|
||||
# Events endpoint
|
||||
events_response = client.get("/api/runtime/events")
|
||||
assert events_response.status_code == 200
|
||||
events_payload = events_response.json()
|
||||
assert "events" in events_payload
|
||||
assert len(events_payload["events"]) == 1
|
||||
event = events_payload["events"][0]
|
||||
assert "timestamp" in event
|
||||
assert "event" in event
|
||||
assert "details" in event
|
||||
assert "session" in event
|
||||
|
||||
|
||||
def test_runtime_service_gateway_status_contract(monkeypatch, tmp_path):
|
||||
"""Test gateway status endpoint maintains contract."""
|
||||
run_dir = tmp_path / "runs" / "demo"
|
||||
state_dir = run_dir / "state"
|
||||
state_dir.mkdir(parents=True)
|
||||
(state_dir / "runtime_state.json").write_text(
|
||||
json.dumps(
|
||||
{
|
||||
"context": {
|
||||
"config_name": "demo",
|
||||
"run_dir": str(run_dir),
|
||||
"bootstrap_values": {},
|
||||
}
|
||||
}
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
monkeypatch.setattr(runtime_module, "PROJECT_ROOT", tmp_path)
|
||||
monkeypatch.setattr(runtime_module, "_is_gateway_running", lambda: True)
|
||||
runtime_module.get_runtime_state().gateway_port = 8765
|
||||
|
||||
with TestClient(create_app()) as client:
|
||||
response = client.get("/api/runtime/gateway/status")
|
||||
assert response.status_code == 200
|
||||
payload = response.json()
|
||||
assert "is_running" in payload
|
||||
assert "port" in payload
|
||||
assert "run_id" in payload
|
||||
assert payload["is_running"] is True
|
||||
assert payload["port"] == 8765
|
||||
assert payload["run_id"] == "demo"
|
||||
|
||||
|
||||
def test_start_runtime_restore_reuses_historical_run_id(monkeypatch, tmp_path):
|
||||
|
||||
@@ -200,6 +200,179 @@ def test_trading_service_market_cap_endpoint(monkeypatch):
|
||||
}
|
||||
|
||||
|
||||
def test_trading_service_contract_stability():
|
||||
"""Verify trading service API maintains contract stability."""
|
||||
app = create_app()
|
||||
routes = {route.path: route for route in app.routes if hasattr(route, "methods")}
|
||||
|
||||
# Health endpoint
|
||||
assert "/health" in routes
|
||||
|
||||
# Trading data endpoints
|
||||
assert "/api/prices" in routes
|
||||
assert "/api/financials" in routes
|
||||
assert "/api/news" in routes
|
||||
assert "/api/insider-trades" in routes
|
||||
assert "/api/market/status" in routes
|
||||
assert "/api/market-cap" in routes
|
||||
assert "/api/line-items" in routes
|
||||
|
||||
# Verify all are GET endpoints (read-only service)
|
||||
for path in ["/api/prices", "/api/financials", "/api/news", "/api/insider-trades",
|
||||
"/api/market/status", "/api/market-cap", "/api/line-items"]:
|
||||
assert "GET" in routes[path].methods
|
||||
|
||||
|
||||
def test_trading_service_prices_contract(monkeypatch):
|
||||
"""Test prices endpoint maintains response contract."""
|
||||
monkeypatch.setattr(
|
||||
"backend.domains.trading.get_prices_payload",
|
||||
lambda ticker, start_date, end_date: {
|
||||
"ticker": ticker,
|
||||
"prices": [
|
||||
Price(
|
||||
open=1.0,
|
||||
close=2.0,
|
||||
high=2.5,
|
||||
low=0.5,
|
||||
volume=100,
|
||||
time="2026-03-20",
|
||||
)
|
||||
],
|
||||
},
|
||||
)
|
||||
|
||||
with TestClient(create_app()) as client:
|
||||
response = client.get(
|
||||
"/api/prices",
|
||||
params={
|
||||
"ticker": "AAPL",
|
||||
"start_date": "2026-03-01",
|
||||
"end_date": "2026-03-20",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
payload = response.json()
|
||||
assert "ticker" in payload
|
||||
assert "prices" in payload
|
||||
assert isinstance(payload["prices"], list)
|
||||
if payload["prices"]:
|
||||
price = payload["prices"][0]
|
||||
assert "open" in price
|
||||
assert "close" in price
|
||||
assert "high" in price
|
||||
assert "low" in price
|
||||
assert "volume" in price
|
||||
assert "time" in price
|
||||
|
||||
|
||||
def test_trading_service_financials_contract(monkeypatch):
|
||||
"""Test financials endpoint maintains response contract."""
|
||||
monkeypatch.setattr(
|
||||
"backend.domains.trading.get_financials_payload",
|
||||
lambda ticker, end_date, period, limit: {
|
||||
"financial_metrics": [
|
||||
FinancialMetrics(
|
||||
ticker=ticker,
|
||||
report_period=end_date,
|
||||
period=period,
|
||||
currency="USD",
|
||||
market_cap=123.0,
|
||||
enterprise_value=None,
|
||||
price_to_earnings_ratio=None,
|
||||
price_to_book_ratio=None,
|
||||
price_to_sales_ratio=None,
|
||||
enterprise_value_to_ebitda_ratio=None,
|
||||
enterprise_value_to_revenue_ratio=None,
|
||||
free_cash_flow_yield=None,
|
||||
peg_ratio=None,
|
||||
gross_margin=None,
|
||||
operating_margin=None,
|
||||
net_margin=None,
|
||||
return_on_equity=None,
|
||||
return_on_assets=None,
|
||||
return_on_invested_capital=None,
|
||||
asset_turnover=None,
|
||||
inventory_turnover=None,
|
||||
receivables_turnover=None,
|
||||
days_sales_outstanding=None,
|
||||
operating_cycle=None,
|
||||
working_capital_turnover=None,
|
||||
current_ratio=None,
|
||||
quick_ratio=None,
|
||||
cash_ratio=None,
|
||||
operating_cash_flow_ratio=None,
|
||||
debt_to_equity=None,
|
||||
debt_to_assets=None,
|
||||
interest_coverage=None,
|
||||
revenue_growth=None,
|
||||
earnings_growth=None,
|
||||
book_value_growth=None,
|
||||
earnings_per_share_growth=None,
|
||||
free_cash_flow_growth=None,
|
||||
operating_income_growth=None,
|
||||
ebitda_growth=None,
|
||||
payout_ratio=None,
|
||||
earnings_per_share=None,
|
||||
book_value_per_share=None,
|
||||
free_cash_flow_per_share=None,
|
||||
)
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
with TestClient(create_app()) as client:
|
||||
response = client.get(
|
||||
"/api/financials",
|
||||
params={"ticker": "AAPL", "end_date": "2026-03-20"},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
payload = response.json()
|
||||
assert "financial_metrics" in payload
|
||||
assert isinstance(payload["financial_metrics"], list)
|
||||
|
||||
|
||||
def test_trading_service_market_status_contract(monkeypatch):
|
||||
"""Test market status endpoint maintains response contract."""
|
||||
monkeypatch.setattr(
|
||||
"backend.domains.trading.get_market_status_payload",
|
||||
lambda: {"status": "open", "status_text": "Open", "next_open": "09:30"},
|
||||
)
|
||||
|
||||
with TestClient(create_app()) as client:
|
||||
response = client.get("/api/market/status")
|
||||
|
||||
assert response.status_code == 200
|
||||
payload = response.json()
|
||||
assert "status" in payload
|
||||
|
||||
|
||||
def test_trading_service_market_cap_contract(monkeypatch):
|
||||
"""Test market cap endpoint maintains response contract."""
|
||||
monkeypatch.setattr(
|
||||
"backend.domains.trading.get_market_cap_payload",
|
||||
lambda ticker, end_date: {
|
||||
"ticker": ticker,
|
||||
"end_date": end_date,
|
||||
"market_cap": 3.5e12,
|
||||
},
|
||||
)
|
||||
|
||||
with TestClient(create_app()) as client:
|
||||
response = client.get(
|
||||
"/api/market-cap",
|
||||
params={"ticker": "AAPL", "end_date": "2026-03-20"},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
payload = response.json()
|
||||
assert "ticker" in payload
|
||||
assert "end_date" in payload
|
||||
assert "market_cap" in payload
|
||||
|
||||
|
||||
def test_trading_service_line_items_endpoint(monkeypatch):
|
||||
monkeypatch.setattr(
|
||||
"backend.domains.trading.get_line_items_payload",
|
||||
|
||||
Reference in New Issue
Block a user