Compare commits
11 Commits
2027635efe
...
feature/ag
| Author | SHA1 | Date | |
|---|---|---|---|
| dfc8fda187 | |||
| aae4bc7d40 | |||
| 11849208ed | |||
| 62c7341cf6 | |||
| 80ce63da5a | |||
| a9d863073f | |||
| 4ea8fc4c32 | |||
| 771de8c49c | |||
| a399384e07 | |||
| ecfbd87244 | |||
| dc0b250adc |
8
.gitignore
vendored
@@ -51,11 +51,19 @@ node_modules
|
||||
outputs/
|
||||
/production/
|
||||
/smoke_test/
|
||||
/frontend/dist/
|
||||
/frontend/test-results/
|
||||
|
||||
# Local tooling state
|
||||
.omc/
|
||||
/.codex/
|
||||
/.codex
|
||||
/.pydeps/
|
||||
/referance/
|
||||
/.pids/
|
||||
/.pytest_cache/
|
||||
/.ruff_cache/
|
||||
/evotraders.egg-info/
|
||||
|
||||
# Run outputs
|
||||
/runs/
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
73343
|
||||
@@ -1 +0,0 @@
|
||||
73348
|
||||
@@ -1 +0,0 @@
|
||||
66939
|
||||
@@ -1 +0,0 @@
|
||||
73345
|
||||
@@ -1 +0,0 @@
|
||||
73347
|
||||
@@ -1 +0,0 @@
|
||||
73346
|
||||
@@ -1 +0,0 @@
|
||||
73344
|
||||
@@ -115,7 +115,8 @@ npm run test # Vitest 单元测试
|
||||
|
||||
| 文件 | 职责 |
|
||||
|------|------|
|
||||
| `pipeline.py` | TradingPipeline - 核心编排器(分析→沟通→决策→执行→评估) |
|
||||
| `pipeline.py` | TradingPipeline - 核心编排器(分析→沟通→决策→执行→评估),支持断点 Checkpoint |
|
||||
| `apo.py` | PolicyOptimizer - (APO) 自动策略优化器,根据 P&L 自动修改 Agent POLICY.md |
|
||||
| `pipeline_runner.py` | REST API 触发的独立执行,5 阶段启动 |
|
||||
| `scheduler.py` | BacktestScheduler、Scheduler - 回测/实盘调度 |
|
||||
| `state_sync.py` | StateSync - 状态同步和广播 |
|
||||
@@ -168,7 +169,8 @@ backend/
|
||||
│ └── models.py # ProcessRun、ProcessRunState
|
||||
│
|
||||
├── core/ # Pipeline 执行
|
||||
│ ├── pipeline.py # TradingPipeline(核心编排器)
|
||||
│ ├── pipeline.py # TradingPipeline(核心编排器,支持恢复)
|
||||
│ ├── apo.py # PolicyOptimizer(自动调优)
|
||||
│ ├── pipeline_runner.py # 独立 Pipeline 执行
|
||||
│ ├── scheduler.py # 调度器
|
||||
│ └── state_sync.py # 状态同步
|
||||
|
||||
43
README.md
@@ -21,8 +21,11 @@ The repository name still uses `evotraders`, but the product-facing branding now
|
||||
**Multi-agent trading team**
|
||||
Six roles collaborate like a real desk: four specialist analysts (fundamentals, technical, sentiment, valuation), one portfolio manager, and one risk manager.
|
||||
|
||||
**Continuous learning**
|
||||
Agents can persist long-term memory with ReMe, reflect after each cycle, and evolve their decision patterns over time.
|
||||
**Continuous learning & Evolution**
|
||||
Agents persist long-term memory with ReMe and reflect after each cycle. The **Autonomous Policy Optimizer (APO)** automatically tunes agent operational policies (`POLICY.md`) based on daily P&L feedback to correct recurring mistakes.
|
||||
|
||||
**Robust execution with recovery**
|
||||
The trading pipeline supports **phase-based checkpointing**. If a run is interrupted, it can resume from the last successful phase (Analysis, Risk, Discussion, Decision, Execution, or Settlement), ensuring resilience in production.
|
||||
|
||||
**Backtest and live modes**
|
||||
The same runtime model supports historical simulation and live execution with real-time market data.
|
||||
@@ -68,6 +71,7 @@ Runtime state is stored in `runs/<run_id>/` — this is the **runtime source of
|
||||
|
||||
### Documentation
|
||||
|
||||
- [docs/README.md](./docs/README.md) — documentation index and reading order
|
||||
- [docs/current-architecture.md](./docs/current-architecture.md) — canonical architecture facts
|
||||
- [services/README.md](./services/README.md) — service boundaries and migration details
|
||||
- [docs/current-architecture.excalidraw](./docs/current-architecture.excalidraw) — visual diagram
|
||||
@@ -84,15 +88,11 @@ Runtime state is stored in `runs/<run_id>/` — this is the **runtime source of
|
||||
# clone this repository, then:
|
||||
cd evotraders
|
||||
|
||||
# backend runtime dependencies
|
||||
uv pip install -r requirements.txt
|
||||
|
||||
# install package entrypoint in editable mode
|
||||
uv pip install -e .
|
||||
|
||||
# optional
|
||||
# optional dev dependencies
|
||||
# uv pip install -e ".[dev]"
|
||||
# pip install -e .
|
||||
```
|
||||
|
||||
Frontend dependencies:
|
||||
@@ -103,7 +103,7 @@ npm ci
|
||||
cd ..
|
||||
```
|
||||
|
||||
Production deployment should prefer `requirements.txt` for backend and `npm ci` for frontend so the pulled environment matches the checked-in lockfiles and version pins.
|
||||
Production deployment should prefer the checked-in Python package metadata in `pyproject.toml` for backend installation and `npm ci` for frontend so the pulled environment matches the checked-in dependency declarations and lockfiles.
|
||||
|
||||
### 2. Configure environment
|
||||
|
||||
@@ -206,13 +206,13 @@ Recommended local development flow:
|
||||
./start-dev.sh
|
||||
```
|
||||
|
||||
This starts:
|
||||
This starts directly from the script:
|
||||
|
||||
- `agent_service` at `http://localhost:8000`
|
||||
- `trading_service` at `http://localhost:8001`
|
||||
- `news_service` at `http://localhost:8002`
|
||||
- `runtime_service` at `http://localhost:8003`
|
||||
- gateway WebSocket at `ws://localhost:8765`
|
||||
- gateway WebSocket at `ws://localhost:8765` via `runtime_service` managed startup
|
||||
|
||||
Then start the frontend in another terminal:
|
||||
|
||||
@@ -229,31 +229,34 @@ python -m uvicorn backend.apps.agent_service:app --host 0.0.0.0 --port 8000 --re
|
||||
python -m uvicorn backend.apps.trading_service:app --host 0.0.0.0 --port 8001 --reload
|
||||
python -m uvicorn backend.apps.news_service:app --host 0.0.0.0 --port 8002 --reload
|
||||
python -m uvicorn backend.apps.runtime_service:app --host 0.0.0.0 --port 8003 --reload
|
||||
# compatibility gateway path, not the recommended primary dev entrypoint
|
||||
python -m backend.main --mode live --host 0.0.0.0 --port 8765
|
||||
|
||||
# then create a runtime so runtime_service can spawn the Gateway subprocess
|
||||
curl -X POST http://localhost:8003/api/runtime/start \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"launch_mode":"fresh","tickers":["AAPL","MSFT"],"mode":"live"}'
|
||||
```
|
||||
### 4. Run backtest or live mode
|
||||
|
||||
Backtest:
|
||||
|
||||
```bash
|
||||
python backend/main.py --mode backtest --config-name smoke_fullstack --start-date 2025-11-01 --end-date 2025-12-01
|
||||
python backend/main.py --mode backtest --config-name smoke_fullstack --start-date 2025-11-01 --end-date 2025-12-01 --enable-memory
|
||||
curl -X POST http://localhost:8003/api/runtime/start \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"launch_mode":"fresh","mode":"backtest","tickers":["AAPL","MSFT"],"start_date":"2025-11-01","end_date":"2025-12-01"}'
|
||||
```
|
||||
|
||||
Live:
|
||||
|
||||
```bash
|
||||
python backend/main.py --mode live --config-name live
|
||||
python backend/main.py --mode live --config-name live --enable-memory
|
||||
python backend/main.py --mode live --config-name live --interval-minutes 60
|
||||
python backend/main.py --mode live --config-name live --trigger-time 22:30
|
||||
curl -X POST http://localhost:8003/api/runtime/start \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"launch_mode":"fresh","mode":"live","tickers":["AAPL","MSFT"]}'
|
||||
```
|
||||
|
||||
Help:
|
||||
|
||||
```bash
|
||||
python backend/main.py --help
|
||||
python backend/main.py --help # compatibility standalone entrypoint only
|
||||
```
|
||||
### Offline backtest data
|
||||
|
||||
@@ -311,7 +314,7 @@ If these are not set, the frontend falls back to its local defaults and compatib
|
||||
|
||||
```text
|
||||
Market data -> independent analyst work -> team communication -> portfolio decision ->
|
||||
risk review -> execution/settlement -> reflection/memory update
|
||||
risk review -> execution/settlement -> reflection/memory update -> APO policy tuning
|
||||
```
|
||||
|
||||
The runtime manager also tracks:
|
||||
|
||||
36
README_zh.md
@@ -66,7 +66,9 @@ frontend -> runtime_service/control APIs -> gateway/runtime manager -> market se
|
||||
|
||||
### 文档
|
||||
|
||||
- [docs/README.md](./docs/README.md) — 文档索引与阅读顺序
|
||||
- [docs/current-architecture.md](./docs/current-architecture.md) — 权威架构事实
|
||||
- [docs/project-layout.md](./docs/project-layout.md) — 当前目录结构与职责说明
|
||||
- [services/README.md](./services/README.md) — 服务边界和迁移详情
|
||||
- [docs/current-architecture.excalidraw](./docs/current-architecture.excalidraw) — 架构图
|
||||
- [docs/development-roadmap.md](./docs/development-roadmap.md) — 下一步执行计划
|
||||
@@ -82,15 +84,11 @@ frontend -> runtime_service/control APIs -> gateway/runtime manager -> market se
|
||||
# 克隆仓库后进入项目目录
|
||||
cd evotraders
|
||||
|
||||
# 安装后端运行时依赖
|
||||
uv pip install -r requirements.txt
|
||||
|
||||
# 安装项目入口(可编辑模式)
|
||||
uv pip install -e .
|
||||
|
||||
# 可选
|
||||
# 可选开发依赖
|
||||
# uv pip install -e ".[dev]"
|
||||
# pip install -e .
|
||||
```
|
||||
|
||||
前端依赖:
|
||||
@@ -101,7 +99,7 @@ npm ci
|
||||
cd ..
|
||||
```
|
||||
|
||||
生产环境部署建议后端使用 `requirements.txt`,前端使用 `npm ci`,这样拉起的环境会严格跟随仓库中锁定的依赖版本。
|
||||
生产环境部署建议后端以 `pyproject.toml` 中声明的包元数据为准进行安装,前端使用 `npm ci`,这样拉起的环境会严格跟随仓库中声明的依赖和锁定版本。
|
||||
|
||||
### 2. 配置环境变量
|
||||
|
||||
@@ -178,7 +176,7 @@ python3 scripts/smoke_evo_runtime.py --agent-id fundamentals_analyst
|
||||
- `trading_service`:`http://localhost:8001`
|
||||
- `news_service`:`http://localhost:8002`
|
||||
- `runtime_service`:`http://localhost:8003`
|
||||
- gateway WebSocket:`ws://localhost:8765`
|
||||
- gateway WebSocket:`ws://localhost:8765`,由 `runtime_service` 托管拉起
|
||||
|
||||
然后在另一个终端启动前端:
|
||||
|
||||
@@ -195,8 +193,11 @@ python -m uvicorn backend.apps.agent_service:app --host 0.0.0.0 --port 8000 --re
|
||||
python -m uvicorn backend.apps.trading_service:app --host 0.0.0.0 --port 8001 --reload
|
||||
python -m uvicorn backend.apps.news_service:app --host 0.0.0.0 --port 8002 --reload
|
||||
python -m uvicorn backend.apps.runtime_service:app --host 0.0.0.0 --port 8003 --reload
|
||||
# 兼容性 gateway 路径,不是推荐的主要开发入口
|
||||
python -m backend.main --mode live --host 0.0.0.0 --port 8765
|
||||
|
||||
# 然后通过 runtime_service 创建运行时,由它拉起 Gateway 子进程
|
||||
curl -X POST http://localhost:8003/api/runtime/start \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"launch_mode":"fresh","tickers":["AAPL","MSFT"],"mode":"live"}'
|
||||
```
|
||||
|
||||
仓库里部署脚本使用的 `production` 只是一个示例 run label,不应再把它理解成
|
||||
@@ -207,24 +208,19 @@ python -m backend.main --mode live --host 0.0.0.0 --port 8765
|
||||
回测:
|
||||
|
||||
```bash
|
||||
python backend/main.py --mode backtest --config-name smoke_fullstack --start-date 2025-11-01 --end-date 2025-12-01
|
||||
python backend/main.py --mode backtest --config-name smoke_fullstack --start-date 2025-11-01 --end-date 2025-12-01 --enable-memory
|
||||
curl -X POST http://localhost:8003/api/runtime/start \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"launch_mode":"fresh","mode":"backtest","tickers":["AAPL","MSFT"],"start_date":"2025-11-01","end_date":"2025-12-01"}'
|
||||
```
|
||||
|
||||
实盘:
|
||||
|
||||
```bash
|
||||
python backend/main.py --mode live --config-name live
|
||||
python backend/main.py --mode live --config-name live --enable-memory
|
||||
python backend/main.py --mode live --config-name live --interval-minutes 60
|
||||
python backend/main.py --mode live --config-name live --trigger-time 22:30
|
||||
curl -X POST http://localhost:8003/api/runtime/start \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"launch_mode":"fresh","mode":"live","tickers":["AAPL","MSFT"]}'
|
||||
```
|
||||
|
||||
帮助:
|
||||
|
||||
```bash
|
||||
python backend/main.py --help
|
||||
```
|
||||
### 离线回测数据
|
||||
|
||||
如果只是想快速体验回测,不依赖外部行情 API,可以下载离线数据包并解压到 `backend/data`:
|
||||
|
||||
@@ -310,11 +310,12 @@ class EvoAgent(ToolGuardMixin, ReActAgent):
|
||||
)
|
||||
logger.debug("Registered workspace watch hook")
|
||||
|
||||
async def _reasoning(self, **kwargs) -> Msg:
|
||||
async def _reasoning(self, tool_choice: Optional[str] = None, **kwargs) -> Msg:
|
||||
"""Override reasoning to execute pre-reasoning hooks.
|
||||
|
||||
Args:
|
||||
**kwargs: Arguments for reasoning
|
||||
tool_choice: Optional tool choice for structured output
|
||||
**kwargs: Additional arguments for reasoning
|
||||
|
||||
Returns:
|
||||
Response message
|
||||
@@ -327,7 +328,7 @@ class EvoAgent(ToolGuardMixin, ReActAgent):
|
||||
)
|
||||
|
||||
# Call parent (which may be ToolGuardMixin's _reasoning)
|
||||
return await super()._reasoning(**kwargs)
|
||||
return await super()._reasoning(tool_choice=tool_choice, **kwargs)
|
||||
|
||||
def reload_runtime_assets(self, active_skill_dirs: Optional[List[Path]] = None) -> None:
|
||||
"""Reload toolkit and system prompt from current run assets.
|
||||
@@ -579,7 +580,7 @@ class EvoAgent(ToolGuardMixin, ReActAgent):
|
||||
return
|
||||
|
||||
try:
|
||||
self._messenger = AgentMessenger(agent_id=self.agent_id)
|
||||
self._messenger = AgentMessenger()
|
||||
self._task_delegator = TaskDelegator(agent=self)
|
||||
logger.debug(
|
||||
"Team infrastructure initialized for agent: %s",
|
||||
|
||||
@@ -12,7 +12,7 @@ from __future__ import annotations
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
from datetime import UTC, datetime
|
||||
from datetime import datetime, timezone
|
||||
from enum import Enum
|
||||
|
||||
from typing import Any, Callable, Dict, Iterable, List, Optional, Set
|
||||
@@ -78,7 +78,7 @@ class ApprovalRecord:
|
||||
self.session_id = session_id
|
||||
self.status = ApprovalStatus.PENDING
|
||||
self.findings = findings or []
|
||||
self.created_at = datetime.now(UTC)
|
||||
self.created_at = datetime.now(timezone.utc)
|
||||
self.resolved_at: Optional[datetime] = None
|
||||
self.resolved_by: Optional[str] = None
|
||||
self.metadata: Dict[str, Any] = {}
|
||||
@@ -163,7 +163,7 @@ class ToolGuardStore:
|
||||
return record
|
||||
|
||||
record.status = status
|
||||
record.resolved_at = datetime.now(UTC)
|
||||
record.resolved_at = datetime.now(timezone.utc)
|
||||
record.resolved_by = resolved_by
|
||||
if notify_request and record.pending_request:
|
||||
if status == ApprovalStatus.APPROVED:
|
||||
|
||||
372
backend/agents/dynamic_team_types.py
Normal file
@@ -0,0 +1,372 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Dynamic Team Types - Core data types for PM-driven analyst team management.
|
||||
|
||||
This module provides data structures for:
|
||||
- Analyst persona definitions (custom analyst types)
|
||||
- Analyst creation configuration (custom SOUL.md, AGENTS.md, etc.)
|
||||
- Dynamic team runtime state tracking
|
||||
|
||||
These types enable the Portfolio Manager to dynamically create, clone, and manage
|
||||
analyst agents with custom configurations beyond the predefined 4 analyst types.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional, Dict, Any, List
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
@dataclass
|
||||
class AnalystPersona:
|
||||
"""Analyst role definition - extends or replaces personas.yaml entries.
|
||||
|
||||
Defines the identity, focus areas, and characteristics of an analyst type.
|
||||
Can be used to create entirely new analyst types at runtime.
|
||||
|
||||
Attributes:
|
||||
name: Display name for the analyst (e.g., "期权策略分析师")
|
||||
focus: List of focus areas (e.g., ["期权定价", "波动率交易"])
|
||||
description: Detailed description of the analyst's role and expertise
|
||||
preferred_tools: Optional list of preferred tool types or categories
|
||||
icon: Optional icon identifier for frontend display
|
||||
"""
|
||||
name: str
|
||||
focus: List[str]
|
||||
description: str
|
||||
preferred_tools: Optional[List[str]] = None
|
||||
icon: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for serialization."""
|
||||
return {
|
||||
"name": self.name,
|
||||
"focus": self.focus,
|
||||
"description": self.description,
|
||||
"preferred_tools": self.preferred_tools,
|
||||
"icon": self.icon,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> AnalystPersona:
|
||||
"""Create from dictionary."""
|
||||
return cls(
|
||||
name=data["name"],
|
||||
focus=data.get("focus", []),
|
||||
description=data.get("description", ""),
|
||||
preferred_tools=data.get("preferred_tools"),
|
||||
icon=data.get("icon"),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AnalystConfig:
|
||||
"""Complete configuration for dynamically creating an analyst.
|
||||
|
||||
This dataclass allows the PM to specify all aspects of analyst creation,
|
||||
including custom workspace files, model overrides, and skill selections.
|
||||
|
||||
Attributes:
|
||||
persona: Complete persona definition (if creating custom type)
|
||||
analyst_type: Reference to predefined type (e.g., "technical_analyst")
|
||||
soul_md: Custom SOUL.md content (overrides default generation)
|
||||
agents_md: Custom AGENTS.md content (overrides default generation)
|
||||
profile_md: Custom PROFILE.md content (overrides default generation)
|
||||
skills: List of skill IDs to enable for this analyst
|
||||
model_name: Override default model for this analyst
|
||||
memory_config: Custom memory system configuration
|
||||
tags: Classification tags (e.g., ["options", "derivatives"])
|
||||
parent_id: If cloned, the source analyst ID
|
||||
"""
|
||||
# Identity configuration
|
||||
persona: Optional[AnalystPersona] = None
|
||||
analyst_type: Optional[str] = None # Reference to predefined type
|
||||
|
||||
# Workspace file contents (override default generation)
|
||||
soul_md: Optional[str] = None
|
||||
agents_md: Optional[str] = None
|
||||
profile_md: Optional[str] = None
|
||||
bootstrap_md: Optional[str] = None
|
||||
|
||||
# Runtime configuration
|
||||
skills: Optional[List[str]] = field(default_factory=list)
|
||||
model_name: Optional[str] = None
|
||||
memory_config: Optional[Dict[str, Any]] = field(default_factory=dict)
|
||||
|
||||
# Metadata
|
||||
tags: Optional[List[str]] = field(default_factory=list)
|
||||
parent_id: Optional[str] = None # For clone tracking
|
||||
|
||||
def __post_init__(self):
|
||||
"""Initialize default collections."""
|
||||
if self.skills is None:
|
||||
self.skills = []
|
||||
if self.memory_config is None:
|
||||
self.memory_config = {}
|
||||
if self.tags is None:
|
||||
self.tags = []
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for serialization."""
|
||||
return {
|
||||
"persona": self.persona.to_dict() if self.persona else None,
|
||||
"analyst_type": self.analyst_type,
|
||||
"soul_md": self.soul_md,
|
||||
"agents_md": self.agents_md,
|
||||
"profile_md": self.profile_md,
|
||||
"bootstrap_md": self.bootstrap_md,
|
||||
"skills": self.skills,
|
||||
"model_name": self.model_name,
|
||||
"memory_config": self.memory_config,
|
||||
"tags": self.tags,
|
||||
"parent_id": self.parent_id,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> AnalystConfig:
|
||||
"""Create from dictionary."""
|
||||
persona_data = data.get("persona")
|
||||
return cls(
|
||||
persona=AnalystPersona.from_dict(persona_data) if persona_data else None,
|
||||
analyst_type=data.get("analyst_type"),
|
||||
soul_md=data.get("soul_md"),
|
||||
agents_md=data.get("agents_md"),
|
||||
profile_md=data.get("profile_md"),
|
||||
bootstrap_md=data.get("bootstrap_md"),
|
||||
skills=data.get("skills", []),
|
||||
model_name=data.get("model_name"),
|
||||
memory_config=data.get("memory_config", {}),
|
||||
tags=data.get("tags", []),
|
||||
parent_id=data.get("parent_id"),
|
||||
)
|
||||
|
||||
def get_effective_analyst_type(self) -> Optional[str]:
|
||||
"""Get the effective analyst type for tool selection.
|
||||
|
||||
Returns analyst_type if set, otherwise derives from persona name.
|
||||
"""
|
||||
if self.analyst_type:
|
||||
return self.analyst_type
|
||||
if self.persona:
|
||||
# Derive type ID from persona name (e.g., "期权策略分析师" -> "options_strategist")
|
||||
return self._derive_type_id(self.persona.name)
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _derive_type_id(name: str) -> str:
|
||||
"""Derive a type ID from a display name."""
|
||||
import re
|
||||
# Convert Chinese or mixed names to snake_case
|
||||
# Remove special characters, keep alphanumeric and spaces
|
||||
cleaned = re.sub(r'[^\w\s]', '', name)
|
||||
# Convert to lowercase and replace spaces with underscores
|
||||
return cleaned.lower().strip().replace(' ', '_')
|
||||
|
||||
|
||||
@dataclass
|
||||
class DynamicAnalystInstance:
|
||||
"""Runtime information about a dynamically created analyst.
|
||||
|
||||
Tracks the creation metadata and current state of a dynamic analyst.
|
||||
|
||||
Attributes:
|
||||
agent_id: Unique identifier for this analyst instance
|
||||
config: The configuration used to create this analyst
|
||||
created_at: Timestamp when the analyst was created
|
||||
created_by: Identifier of the agent that created this analyst (usually PM)
|
||||
status: Current status (active, paused, removed)
|
||||
"""
|
||||
agent_id: str
|
||||
config: AnalystConfig
|
||||
created_at: str = field(default_factory=lambda: datetime.now().isoformat())
|
||||
created_by: str = "portfolio_manager"
|
||||
status: str = "active" # active, paused, removed
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for serialization."""
|
||||
return {
|
||||
"agent_id": self.agent_id,
|
||||
"config": self.config.to_dict(),
|
||||
"created_at": self.created_at,
|
||||
"created_by": self.created_by,
|
||||
"status": self.status,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> DynamicAnalystInstance:
|
||||
"""Create from dictionary."""
|
||||
return cls(
|
||||
agent_id=data["agent_id"],
|
||||
config=AnalystConfig.from_dict(data.get("config", {})),
|
||||
created_at=data.get("created_at", datetime.now().isoformat()),
|
||||
created_by=data.get("created_by", "portfolio_manager"),
|
||||
status=data.get("status", "active"),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DynamicTeamState:
|
||||
"""Complete runtime state for dynamic analyst team management.
|
||||
|
||||
This state is persisted alongside TEAM_PIPELINE.yaml and tracks:
|
||||
- Custom analyst types registered at runtime
|
||||
- All dynamically created analyst instances
|
||||
- Configuration snapshots for cloning
|
||||
|
||||
Attributes:
|
||||
run_id: The run configuration this state belongs to
|
||||
registered_types: Runtime-registered analyst type definitions
|
||||
instances: Dynamically created analyst instances
|
||||
version: State format version for migration handling
|
||||
"""
|
||||
run_id: str
|
||||
registered_types: Dict[str, AnalystPersona] = field(default_factory=dict)
|
||||
instances: Dict[str, DynamicAnalystInstance] = field(default_factory=dict)
|
||||
version: int = 1
|
||||
|
||||
def register_type(self, type_id: str, persona: AnalystPersona) -> bool:
|
||||
"""Register a new analyst type.
|
||||
|
||||
Returns:
|
||||
True if registered, False if type_id already exists
|
||||
"""
|
||||
if type_id in self.registered_types:
|
||||
return False
|
||||
self.registered_types[type_id] = persona
|
||||
return True
|
||||
|
||||
def add_instance(self, instance: DynamicAnalystInstance) -> None:
|
||||
"""Add a new analyst instance."""
|
||||
self.instances[instance.agent_id] = instance
|
||||
|
||||
def remove_instance(self, agent_id: str) -> bool:
|
||||
"""Mark an instance as removed.
|
||||
|
||||
Returns:
|
||||
True if instance was found and removed
|
||||
"""
|
||||
if agent_id in self.instances:
|
||||
self.instances[agent_id].status = "removed"
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_active_instances(self) -> List[DynamicAnalystInstance]:
|
||||
"""Get all active (non-removed) analyst instances."""
|
||||
return [
|
||||
inst for inst in self.instances.values()
|
||||
if inst.status == "active"
|
||||
]
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for serialization."""
|
||||
return {
|
||||
"run_id": self.run_id,
|
||||
"registered_types": {
|
||||
k: v.to_dict() for k, v in self.registered_types.items()
|
||||
},
|
||||
"instances": {
|
||||
k: v.to_dict() for k, v in self.instances.items()
|
||||
},
|
||||
"version": self.version,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> DynamicTeamState:
|
||||
"""Create from dictionary."""
|
||||
registered_types = {
|
||||
k: AnalystPersona.from_dict(v)
|
||||
for k, v in data.get("registered_types", {}).items()
|
||||
}
|
||||
instances = {
|
||||
k: DynamicAnalystInstance.from_dict(v)
|
||||
for k, v in data.get("instances", {}).items()
|
||||
}
|
||||
return cls(
|
||||
run_id=data.get("run_id", "unknown"),
|
||||
registered_types=registered_types,
|
||||
instances=instances,
|
||||
version=data.get("version", 1),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class CreateAnalystResult:
|
||||
"""Result of creating a dynamic analyst.
|
||||
|
||||
Attributes:
|
||||
success: Whether creation was successful
|
||||
agent_id: The ID of the created analyst (if successful)
|
||||
message: Human-readable result message
|
||||
error: Error details (if failed)
|
||||
"""
|
||||
success: bool
|
||||
agent_id: Optional[str] = None
|
||||
message: str = ""
|
||||
error: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for API responses."""
|
||||
return {
|
||||
"success": self.success,
|
||||
"agent_id": self.agent_id,
|
||||
"message": self.message,
|
||||
"error": self.error,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class CloneAnalystRequest:
|
||||
"""Request to clone an existing analyst.
|
||||
|
||||
Attributes:
|
||||
source_id: ID of the analyst to clone
|
||||
new_id: ID for the new analyst
|
||||
config_overrides: Configuration fields to override
|
||||
"""
|
||||
source_id: str
|
||||
new_id: str
|
||||
config_overrides: Optional[Dict[str, Any]] = field(default_factory=dict)
|
||||
|
||||
def __post_init__(self):
|
||||
if self.config_overrides is None:
|
||||
self.config_overrides = {}
|
||||
|
||||
|
||||
@dataclass
|
||||
class AnalystTypeInfo:
|
||||
"""Information about an available analyst type.
|
||||
|
||||
Used for listing all available types (predefined + runtime-registered).
|
||||
|
||||
Attributes:
|
||||
type_id: Unique identifier for this type
|
||||
name: Display name
|
||||
description: Type description
|
||||
is_builtin: Whether this is a built-in type or runtime-registered
|
||||
source: Source of this type (e.g., "constants", "runtime", "config")
|
||||
"""
|
||||
type_id: str
|
||||
name: str
|
||||
description: str
|
||||
is_builtin: bool
|
||||
source: str
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for API responses."""
|
||||
return {
|
||||
"type_id": self.type_id,
|
||||
"name": self.name,
|
||||
"description": self.description,
|
||||
"is_builtin": self.is_builtin,
|
||||
"source": self.source,
|
||||
}
|
||||
|
||||
|
||||
__all__ = [
|
||||
"AnalystPersona",
|
||||
"AnalystConfig",
|
||||
"DynamicAnalystInstance",
|
||||
"DynamicTeamState",
|
||||
"CreateAnalystResult",
|
||||
"CloneAnalystRequest",
|
||||
"AnalystTypeInfo",
|
||||
]
|
||||
@@ -14,6 +14,14 @@ from backend.agents.agent_workspace import load_agent_workspace_config
|
||||
from backend.agents.skills_manager import SkillsManager
|
||||
from backend.agents.skill_metadata import parse_skill_metadata
|
||||
from backend.config.bootstrap_config import get_bootstrap_config_for_run
|
||||
from backend.tools.dynamic_team_tools import (
|
||||
create_analyst,
|
||||
clone_analyst,
|
||||
remove_analyst,
|
||||
list_analyst_types,
|
||||
get_analyst_info,
|
||||
get_team_summary,
|
||||
)
|
||||
|
||||
|
||||
def load_agent_profiles() -> Dict[str, Dict[str, Any]]:
|
||||
@@ -138,6 +146,23 @@ def _register_portfolio_tool_groups(toolkit: Any, pm_agent: Any) -> None:
|
||||
group_name="portfolio_ops",
|
||||
)
|
||||
|
||||
# Register dynamic team management tools
|
||||
toolkit.create_tool_group(
|
||||
group_name="dynamic_team",
|
||||
description="Dynamic analyst team management tools.",
|
||||
active=False,
|
||||
notes=(
|
||||
"Use these tools to create, clone, and manage analyst agents dynamically. "
|
||||
"Only available when allow_dynamic_team_update is enabled."
|
||||
),
|
||||
)
|
||||
toolkit.register_tool_function(create_analyst, group_name="dynamic_team")
|
||||
toolkit.register_tool_function(clone_analyst, group_name="dynamic_team")
|
||||
toolkit.register_tool_function(remove_analyst, group_name="dynamic_team")
|
||||
toolkit.register_tool_function(list_analyst_types, group_name="dynamic_team")
|
||||
toolkit.register_tool_function(get_analyst_info, group_name="dynamic_team")
|
||||
toolkit.register_tool_function(get_team_summary, group_name="dynamic_team")
|
||||
|
||||
|
||||
def _register_risk_tool_groups(toolkit: Any) -> None:
|
||||
"""注册风险工具组"""
|
||||
|
||||
@@ -312,12 +312,21 @@ class RunWorkspaceManager:
|
||||
"- 审阅分析以理解市场观点\n"
|
||||
"- 在做决策前先考虑风险警告\n"
|
||||
"- 评估当前投资组合持仓、现金与保证金占用\n"
|
||||
"- 在做最终决策前,先判断当前团队是否足以覆盖任务;如果覆盖不足,不要勉强给结论,先扩编团队\n"
|
||||
"- 当现有团队覆盖不足、观点分歧过大、或出现新的专业分析需求时,优先考虑动态创建合适的分析师,再继续讨论\n"
|
||||
"- 决策必须与整体投资目标和风险约束一致\n\n"
|
||||
"动态扩编触发条件:\n"
|
||||
"- 出现当前团队未覆盖的研究领域:期权、宏观、行业专项、事件驱动、监管冲击、加密资产、商品链、特殊市场结构\n"
|
||||
"- 关键 ticker 的结论依赖某种专业知识,但现有 analyst 无法提供直接证据链\n"
|
||||
"- 分析师之间存在明显冲突,且仅靠风险经理无法完成裁决\n"
|
||||
"- 你需要第二个同类型但不同风格的 analyst 来验证一个高风险假设\n\n"
|
||||
"决策类型:\n"
|
||||
'- `long`:看涨,建议买入\n'
|
||||
'- `short`:看跌,建议卖出或做空\n'
|
||||
'- `hold`:中性,维持当前持仓\n\n'
|
||||
"输出要求:\n"
|
||||
"- 触发扩编条件时,必须先使用动态团队工具创建分析师,并在继续决策前吸收其分析输入\n"
|
||||
"- 不允许口头声称“需要更多分析”但不实际调用创建工具\n"
|
||||
"- 使用 `make_decision` 工具记录每个股票的最终决策\n"
|
||||
"- 记录完成后给出投资逻辑总结\n"
|
||||
"- 最终总结必须使用简体中文\n"
|
||||
@@ -327,6 +336,10 @@ class RunWorkspaceManager:
|
||||
"- 在决定数量时考虑可用现金,不要超出现金允许范围\n"
|
||||
"- 考虑做空头寸的保证金要求\n"
|
||||
"- 仓位规模相对于组合总资产保持保守\n"
|
||||
"- 当任务涉及当前团队未覆盖的领域(如期权、宏观、行业专项、事件驱动、加密资产等)时,应优先创建或克隆对应分析师,而不是勉强用现有团队输出低质量结论\n"
|
||||
"- 当分析师之间长期存在高冲突且缺乏裁决信息时,应考虑增加一个补充视角的分析师\n"
|
||||
"- 如果你已经识别出覆盖缺口,却没有调用动态团队工具补齐团队,就不应直接输出高置信度交易决策\n"
|
||||
"- 对新创建分析师的输出必须纳入本轮决策依据,不能创建后忽略\n"
|
||||
"- 始终为决策提供清晰理由\n"
|
||||
"- 不要输出英文投资报告或英文结论\n"
|
||||
)
|
||||
|
||||
@@ -13,6 +13,7 @@ from .workspaces import router as workspaces_router
|
||||
from .guard import router as guard_router
|
||||
from .runtime import router as runtime_router
|
||||
from .runs import router as runs_router
|
||||
from .dynamic_team import router as dynamic_team_router
|
||||
|
||||
__all__ = [
|
||||
"agents_router",
|
||||
@@ -20,4 +21,5 @@ __all__ = [
|
||||
"guard_router",
|
||||
"runtime_router",
|
||||
"runs_router",
|
||||
"dynamic_team_router",
|
||||
]
|
||||
|
||||
@@ -1,46 +1,18 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Agent API Routes
|
||||
|
||||
Provides REST API endpoints for both:
|
||||
|
||||
- design-time agent management under `workspaces/`
|
||||
- run-scoped agent asset access under `runs/<run_id>/`
|
||||
"""
|
||||
"""Agent API routes for design-time workspace registry CRUD only."""
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Depends, Body, UploadFile, File, Form
|
||||
from fastapi import APIRouter, HTTPException, Depends
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from backend.agents import AgentFactory, get_registry
|
||||
from backend.agents.workspace_manager import RunWorkspaceManager
|
||||
from backend.agents.agent_workspace import load_agent_workspace_config
|
||||
from backend.agents.skills_manager import SkillsManager
|
||||
from backend.agents.toolkit_factory import load_agent_profiles
|
||||
from backend.config.bootstrap_config import get_bootstrap_config_for_run
|
||||
from backend.llm.models import get_agent_model_info
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix="/api/workspaces/{workspace_id}/agents", tags=["agents"])
|
||||
DESIGN_SCOPE = "design_workspace"
|
||||
RUNTIME_SCOPE = "runtime_run"
|
||||
RUNTIME_SCOPE_NOTE = (
|
||||
"For profile, skills, and editable agent files, `workspace_id` is treated "
|
||||
"as the active run id under `runs/<run_id>/`, not as the design-time "
|
||||
"`workspaces/` registry."
|
||||
)
|
||||
|
||||
|
||||
def _runtime_scope_fields() -> dict[str, str]:
|
||||
return {
|
||||
"scope_type": RUNTIME_SCOPE,
|
||||
"scope_note": RUNTIME_SCOPE_NOTE,
|
||||
}
|
||||
|
||||
|
||||
def _design_scope_fields() -> dict[str, str]:
|
||||
@@ -65,26 +37,9 @@ class CreateAgentRequest(BaseModel):
|
||||
|
||||
|
||||
class UpdateAgentRequest(BaseModel):
|
||||
"""Request to update an agent."""
|
||||
"""Request to update design-time agent metadata."""
|
||||
name: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
enabled_skills: Optional[List[str]] = None
|
||||
disabled_skills: Optional[List[str]] = None
|
||||
|
||||
|
||||
class InstallExternalSkillRequest(BaseModel):
|
||||
"""Request to install an external skill for one agent."""
|
||||
source: str = Field(..., description="Directory path, zip path, or http(s) zip URL")
|
||||
name: Optional[str] = Field(None, description="Optional override skill name")
|
||||
activate: bool = Field(True, description="Whether to enable skill immediately")
|
||||
|
||||
|
||||
class LocalSkillRequest(BaseModel):
|
||||
skill_name: str = Field(..., description="Local skill name")
|
||||
|
||||
|
||||
class LocalSkillContentRequest(BaseModel):
|
||||
content: str = Field(..., description="Updated SKILL.md content")
|
||||
|
||||
|
||||
class AgentResponse(BaseModel):
|
||||
@@ -99,54 +54,12 @@ class AgentResponse(BaseModel):
|
||||
scope_note: Optional[str] = None
|
||||
|
||||
|
||||
class AgentFileResponse(BaseModel):
|
||||
"""Agent file content response."""
|
||||
filename: str
|
||||
content: str
|
||||
scope_type: str = RUNTIME_SCOPE
|
||||
scope_note: Optional[str] = None
|
||||
|
||||
|
||||
class AgentProfileResponse(BaseModel):
|
||||
agent_id: str
|
||||
workspace_id: str
|
||||
profile: Dict[str, Any]
|
||||
scope_type: str = RUNTIME_SCOPE
|
||||
scope_note: Optional[str] = None
|
||||
|
||||
|
||||
class AgentSkillsResponse(BaseModel):
|
||||
agent_id: str
|
||||
workspace_id: str
|
||||
skills: List[Dict[str, Any]]
|
||||
scope_type: str = RUNTIME_SCOPE
|
||||
scope_note: Optional[str] = None
|
||||
|
||||
|
||||
class SkillDetailResponse(BaseModel):
|
||||
agent_id: str
|
||||
workspace_id: str
|
||||
skill: Dict[str, Any]
|
||||
scope_type: str = RUNTIME_SCOPE
|
||||
scope_note: Optional[str] = None
|
||||
|
||||
|
||||
# Dependencies
|
||||
def get_agent_factory():
|
||||
"""Get AgentFactory instance."""
|
||||
return AgentFactory()
|
||||
|
||||
|
||||
def get_workspace_manager():
|
||||
"""Get run-scoped asset manager for one runtime workspace/run id."""
|
||||
return RunWorkspaceManager()
|
||||
|
||||
|
||||
def get_skills_manager():
|
||||
"""Get SkillsManager instance."""
|
||||
return SkillsManager()
|
||||
|
||||
|
||||
# Routes
|
||||
@router.post("", response_model=AgentResponse)
|
||||
async def create_agent(
|
||||
@@ -270,119 +183,6 @@ async def get_agent(
|
||||
)
|
||||
|
||||
|
||||
@router.get("/{agent_id}/profile", response_model=AgentProfileResponse)
|
||||
async def get_agent_profile(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
skills_manager: SkillsManager = Depends(get_skills_manager),
|
||||
):
|
||||
asset_dir = skills_manager.get_agent_asset_dir(workspace_id, agent_id)
|
||||
agent_config = load_agent_workspace_config(asset_dir / "agent.yaml")
|
||||
profiles = load_agent_profiles()
|
||||
profile = profiles.get(agent_id, {})
|
||||
bootstrap = get_bootstrap_config_for_run(skills_manager.project_root, workspace_id)
|
||||
override = bootstrap.agent_override(agent_id)
|
||||
active_tool_groups = override.get("active_tool_groups", agent_config.active_tool_groups or profile.get("active_tool_groups", []))
|
||||
if not isinstance(active_tool_groups, list):
|
||||
active_tool_groups = []
|
||||
disabled_tool_groups = agent_config.disabled_tool_groups
|
||||
if disabled_tool_groups:
|
||||
disabled_set = set(disabled_tool_groups)
|
||||
active_tool_groups = [group_name for group_name in active_tool_groups if group_name not in disabled_set]
|
||||
|
||||
default_skills = profile.get("skills", [])
|
||||
if not isinstance(default_skills, list):
|
||||
default_skills = []
|
||||
resolved_skills = skills_manager.resolve_agent_skill_names(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
default_skills=default_skills,
|
||||
)
|
||||
prompt_files = agent_config.prompt_files or ["SOUL.md", "PROFILE.md", "AGENTS.md", "POLICY.md", "MEMORY.md"]
|
||||
model_name, model_provider = get_agent_model_info(agent_id)
|
||||
|
||||
return AgentProfileResponse(
|
||||
agent_id=agent_id,
|
||||
workspace_id=workspace_id,
|
||||
profile={
|
||||
"model_name": model_name,
|
||||
"model_provider": model_provider,
|
||||
"prompt_files": prompt_files,
|
||||
"default_skills": default_skills,
|
||||
"resolved_skills": resolved_skills,
|
||||
"active_tool_groups": active_tool_groups,
|
||||
"disabled_tool_groups": disabled_tool_groups,
|
||||
"enabled_skills": agent_config.enabled_skills,
|
||||
"disabled_skills": agent_config.disabled_skills,
|
||||
},
|
||||
**_runtime_scope_fields(),
|
||||
)
|
||||
|
||||
|
||||
@router.get("/{agent_id}/skills", response_model=AgentSkillsResponse)
|
||||
async def get_agent_skills(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
skills_manager: SkillsManager = Depends(get_skills_manager),
|
||||
):
|
||||
agent_asset_dir = skills_manager.get_agent_asset_dir(workspace_id, agent_id)
|
||||
agent_config = load_agent_workspace_config(agent_asset_dir / "agent.yaml")
|
||||
resolved_skills = set(skills_manager.resolve_agent_skill_names(config_name=workspace_id, agent_id=agent_id, default_skills=[]))
|
||||
enabled = set(agent_config.enabled_skills)
|
||||
disabled = set(agent_config.disabled_skills)
|
||||
|
||||
payload = []
|
||||
for item in skills_manager.list_agent_skill_catalog(workspace_id, agent_id):
|
||||
if item.skill_name in disabled:
|
||||
status = "disabled"
|
||||
elif item.skill_name in enabled:
|
||||
status = "enabled"
|
||||
elif item.skill_name in resolved_skills:
|
||||
status = "active"
|
||||
else:
|
||||
status = "available"
|
||||
payload.append({
|
||||
"skill_name": item.skill_name,
|
||||
"name": item.name,
|
||||
"description": item.description,
|
||||
"version": item.version,
|
||||
"source": item.source,
|
||||
"tools": item.tools,
|
||||
"status": status,
|
||||
})
|
||||
|
||||
return AgentSkillsResponse(
|
||||
agent_id=agent_id,
|
||||
workspace_id=workspace_id,
|
||||
skills=payload,
|
||||
**_runtime_scope_fields(),
|
||||
)
|
||||
|
||||
|
||||
@router.get("/{agent_id}/skills/{skill_name}", response_model=SkillDetailResponse)
|
||||
async def get_agent_skill_detail(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
skill_name: str,
|
||||
skills_manager: SkillsManager = Depends(get_skills_manager),
|
||||
):
|
||||
try:
|
||||
detail = skills_manager.load_agent_skill_document(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
skill_name=skill_name,
|
||||
)
|
||||
except FileNotFoundError:
|
||||
raise HTTPException(status_code=404, detail=f"Unknown skill: {skill_name}")
|
||||
|
||||
return SkillDetailResponse(
|
||||
agent_id=agent_id,
|
||||
workspace_id=workspace_id,
|
||||
skill=detail,
|
||||
**_runtime_scope_fields(),
|
||||
)
|
||||
|
||||
|
||||
@router.delete("/{agent_id}")
|
||||
async def delete_agent(
|
||||
workspace_id: str,
|
||||
@@ -448,16 +248,6 @@ async def update_agent(
|
||||
if metadata_updates:
|
||||
registry.update_metadata(agent_id, metadata_updates)
|
||||
|
||||
# Update skills if provided
|
||||
if request.enabled_skills or request.disabled_skills:
|
||||
skills_manager = SkillsManager()
|
||||
skills_manager.update_agent_skill_overrides(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
enable=request.enabled_skills or [],
|
||||
disable=request.disabled_skills or [],
|
||||
)
|
||||
|
||||
# Get updated info
|
||||
agent_info = registry.get(agent_id)
|
||||
return AgentResponse(
|
||||
@@ -469,301 +259,3 @@ async def update_agent(
|
||||
status=agent_info.status,
|
||||
**_design_scope_fields(),
|
||||
)
|
||||
|
||||
|
||||
@router.post("/{agent_id}/skills/{skill_name}/enable")
|
||||
async def enable_skill(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
skill_name: str,
|
||||
registry = Depends(get_registry),
|
||||
):
|
||||
"""
|
||||
Enable a skill for an agent.
|
||||
|
||||
Args:
|
||||
workspace_id: Workspace identifier
|
||||
agent_id: Agent identifier
|
||||
skill_name: Skill name to enable
|
||||
|
||||
Returns:
|
||||
Success message
|
||||
"""
|
||||
agent_info = registry.get(agent_id)
|
||||
if not agent_info or agent_info.workspace_id != workspace_id:
|
||||
raise HTTPException(status_code=404, detail=f"Agent '{agent_id}' not found")
|
||||
|
||||
skills_manager = SkillsManager()
|
||||
result = skills_manager.update_agent_skill_overrides(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
enable=[skill_name],
|
||||
)
|
||||
|
||||
return {
|
||||
"message": f"Skill '{skill_name}' enabled for agent '{agent_id}'",
|
||||
"enabled_skills": result["enabled_skills"],
|
||||
}
|
||||
|
||||
|
||||
@router.post("/{agent_id}/skills/{skill_name}/disable")
|
||||
async def disable_skill(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
skill_name: str,
|
||||
registry = Depends(get_registry),
|
||||
):
|
||||
"""
|
||||
Disable a skill for an agent.
|
||||
|
||||
Args:
|
||||
workspace_id: Workspace identifier
|
||||
agent_id: Agent identifier
|
||||
skill_name: Skill name to disable
|
||||
|
||||
Returns:
|
||||
Success message
|
||||
"""
|
||||
agent_info = registry.get(agent_id)
|
||||
if not agent_info or agent_info.workspace_id != workspace_id:
|
||||
raise HTTPException(status_code=404, detail=f"Agent '{agent_id}' not found")
|
||||
|
||||
skills_manager = SkillsManager()
|
||||
result = skills_manager.update_agent_skill_overrides(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
disable=[skill_name],
|
||||
)
|
||||
|
||||
return {
|
||||
"message": f"Skill '{skill_name}' disabled for agent '{agent_id}'",
|
||||
"disabled_skills": result["disabled_skills"],
|
||||
}
|
||||
|
||||
|
||||
@router.post("/{agent_id}/skills/install")
|
||||
async def install_external_skill(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
request: InstallExternalSkillRequest,
|
||||
registry=Depends(get_registry),
|
||||
):
|
||||
"""Install an external skill into one agent's local skills."""
|
||||
agent_info = registry.get(agent_id)
|
||||
if not agent_info or agent_info.workspace_id != workspace_id:
|
||||
raise HTTPException(status_code=404, detail=f"Agent '{agent_id}' not found")
|
||||
|
||||
skills_manager = SkillsManager()
|
||||
try:
|
||||
result = skills_manager.install_external_skill_for_agent(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
source=request.source,
|
||||
skill_name=request.name,
|
||||
activate=request.activate,
|
||||
)
|
||||
except (FileNotFoundError, ValueError) as exc:
|
||||
raise HTTPException(status_code=400, detail=str(exc))
|
||||
|
||||
return {
|
||||
"message": f"Installed external skill '{result['skill_name']}' for '{agent_id}'",
|
||||
**result,
|
||||
}
|
||||
|
||||
|
||||
@router.post("/{agent_id}/skills/local")
|
||||
async def create_local_skill(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
request: LocalSkillRequest,
|
||||
registry=Depends(get_registry),
|
||||
):
|
||||
agent_info = registry.get(agent_id)
|
||||
if not agent_info or agent_info.workspace_id != workspace_id:
|
||||
raise HTTPException(status_code=404, detail=f"Agent '{agent_id}' not found")
|
||||
|
||||
skills_manager = SkillsManager()
|
||||
try:
|
||||
skills_manager.create_agent_local_skill(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
skill_name=request.skill_name,
|
||||
)
|
||||
except (ValueError, FileExistsError) as exc:
|
||||
raise HTTPException(status_code=400, detail=str(exc))
|
||||
|
||||
return {"message": f"Created local skill '{request.skill_name}' for '{agent_id}'"}
|
||||
|
||||
|
||||
@router.put("/{agent_id}/skills/local/{skill_name}")
|
||||
async def update_local_skill(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
skill_name: str,
|
||||
request: LocalSkillContentRequest,
|
||||
registry=Depends(get_registry),
|
||||
):
|
||||
agent_info = registry.get(agent_id)
|
||||
if not agent_info or agent_info.workspace_id != workspace_id:
|
||||
raise HTTPException(status_code=404, detail=f"Agent '{agent_id}' not found")
|
||||
|
||||
skills_manager = SkillsManager()
|
||||
try:
|
||||
skills_manager.update_agent_local_skill(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
skill_name=skill_name,
|
||||
content=request.content,
|
||||
)
|
||||
except (ValueError, FileNotFoundError) as exc:
|
||||
raise HTTPException(status_code=400, detail=str(exc))
|
||||
|
||||
return {"message": f"Updated local skill '{skill_name}' for '{agent_id}'"}
|
||||
|
||||
|
||||
@router.delete("/{agent_id}/skills/local/{skill_name}")
|
||||
async def delete_local_skill(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
skill_name: str,
|
||||
registry=Depends(get_registry),
|
||||
):
|
||||
agent_info = registry.get(agent_id)
|
||||
if not agent_info or agent_info.workspace_id != workspace_id:
|
||||
raise HTTPException(status_code=404, detail=f"Agent '{agent_id}' not found")
|
||||
|
||||
skills_manager = SkillsManager()
|
||||
try:
|
||||
skills_manager.delete_agent_local_skill(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
skill_name=skill_name,
|
||||
)
|
||||
skills_manager.forget_agent_skill_overrides(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
skill_names=[skill_name],
|
||||
)
|
||||
except (ValueError, FileNotFoundError) as exc:
|
||||
raise HTTPException(status_code=400, detail=str(exc))
|
||||
|
||||
return {"message": f"Deleted local skill '{skill_name}' for '{agent_id}'"}
|
||||
|
||||
|
||||
@router.post("/{agent_id}/skills/upload")
|
||||
async def upload_external_skill(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
file: UploadFile = File(...),
|
||||
name: Optional[str] = Form(None),
|
||||
activate: bool = Form(True),
|
||||
registry=Depends(get_registry),
|
||||
):
|
||||
"""Upload a zip skill package from frontend and install for one agent."""
|
||||
agent_info = registry.get(agent_id)
|
||||
if not agent_info or agent_info.workspace_id != workspace_id:
|
||||
raise HTTPException(status_code=404, detail=f"Agent '{agent_id}' not found")
|
||||
|
||||
original_name = (file.filename or "").strip()
|
||||
if not original_name.lower().endswith(".zip"):
|
||||
raise HTTPException(status_code=400, detail="Uploaded file must be a .zip archive")
|
||||
|
||||
suffix = Path(original_name).suffix or ".zip"
|
||||
temp_path: Optional[str] = None
|
||||
try:
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp:
|
||||
temp_path = tmp.name
|
||||
content = await file.read()
|
||||
tmp.write(content)
|
||||
|
||||
skills_manager = SkillsManager()
|
||||
result = skills_manager.install_external_skill_for_agent(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
source=temp_path,
|
||||
skill_name=name,
|
||||
activate=activate,
|
||||
)
|
||||
except (FileNotFoundError, ValueError) as exc:
|
||||
raise HTTPException(status_code=400, detail=str(exc))
|
||||
finally:
|
||||
try:
|
||||
await file.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to close uploaded file: {e}")
|
||||
if temp_path and os.path.exists(temp_path):
|
||||
os.remove(temp_path)
|
||||
|
||||
return {
|
||||
"message": f"Uploaded and installed external skill '{result['skill_name']}' for '{agent_id}'",
|
||||
**result,
|
||||
}
|
||||
|
||||
|
||||
@router.get("/{agent_id}/files/{filename}", response_model=AgentFileResponse)
|
||||
async def get_agent_file(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
filename: str,
|
||||
workspace_manager: RunWorkspaceManager = Depends(get_workspace_manager),
|
||||
):
|
||||
"""
|
||||
Read an agent file from the run-scoped asset tree under `runs/<run_id>/`.
|
||||
|
||||
Args:
|
||||
workspace_id: Workspace identifier
|
||||
agent_id: Agent identifier
|
||||
filename: File to read (e.g., SOUL.md, PROFILE.md)
|
||||
|
||||
Returns:
|
||||
File content
|
||||
"""
|
||||
try:
|
||||
content = workspace_manager.load_agent_file(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
filename=filename,
|
||||
)
|
||||
return AgentFileResponse(
|
||||
filename=filename,
|
||||
content=content,
|
||||
**_runtime_scope_fields(),
|
||||
)
|
||||
except FileNotFoundError:
|
||||
raise HTTPException(status_code=404, detail=f"File '{filename}' not found")
|
||||
|
||||
|
||||
@router.put("/{agent_id}/files/{filename}", response_model=AgentFileResponse)
|
||||
async def update_agent_file(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
filename: str,
|
||||
content: str = Body(..., media_type="text/plain"),
|
||||
workspace_manager: RunWorkspaceManager = Depends(get_workspace_manager),
|
||||
):
|
||||
"""
|
||||
Update an agent file in the run-scoped asset tree under `runs/<run_id>/`.
|
||||
|
||||
Args:
|
||||
workspace_id: Workspace identifier
|
||||
agent_id: Agent identifier
|
||||
filename: File to update
|
||||
content: New file content
|
||||
|
||||
Returns:
|
||||
Updated file information
|
||||
"""
|
||||
try:
|
||||
workspace_manager.update_agent_file(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
filename=filename,
|
||||
content=content,
|
||||
)
|
||||
return AgentFileResponse(
|
||||
filename=filename,
|
||||
content=content,
|
||||
**_runtime_scope_fields(),
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
404
backend/api/dynamic_team.py
Normal file
@@ -0,0 +1,404 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Dynamic Team API - REST endpoints for managing analyst team dynamically.
|
||||
|
||||
This module provides API endpoints for:
|
||||
- Creating new analysts with custom configuration
|
||||
- Cloning existing analysts
|
||||
- Removing analysts
|
||||
- Listing available analyst types
|
||||
- Getting analyst information
|
||||
- Managing team composition
|
||||
|
||||
These endpoints allow both the PM agent (via tool calls) and frontend
|
||||
(via HTTP) to manage the analyst team dynamically.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from backend.agents.dynamic_team_types import (
|
||||
AnalystPersona,
|
||||
AnalystConfig,
|
||||
AnalystTypeInfo,
|
||||
)
|
||||
from backend.config.constants import ANALYST_TYPES
|
||||
from backend.agents.prompt_loader import get_prompt_loader
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix="/api/dynamic-team", tags=["dynamic-team"])
|
||||
|
||||
PROJECT_ROOT = Path(__file__).resolve().parents[2]
|
||||
|
||||
|
||||
# Pydantic models for API requests/responses
|
||||
|
||||
class AnalystPersonaRequest(BaseModel):
|
||||
"""Request model for analyst persona definition."""
|
||||
name: str = Field(..., description="Display name for the analyst")
|
||||
focus: List[str] = Field(default_factory=list, description="List of focus areas")
|
||||
description: str = Field(..., description="Detailed description")
|
||||
preferred_tools: Optional[List[str]] = Field(None, description="Preferred tool categories")
|
||||
icon: Optional[str] = Field(None, description="Icon identifier")
|
||||
|
||||
|
||||
class CreateAnalystRequest(BaseModel):
|
||||
"""Request model for creating a new analyst."""
|
||||
agent_id: str = Field(..., description="Unique identifier for the new analyst")
|
||||
analyst_type: str = Field(..., description="Base type or custom identifier")
|
||||
persona: Optional[AnalystPersonaRequest] = Field(None, description="Custom persona definition")
|
||||
soul_md: Optional[str] = Field(None, description="Custom SOUL.md content")
|
||||
agents_md: Optional[str] = Field(None, description="Custom AGENTS.md content")
|
||||
profile_md: Optional[str] = Field(None, description="Custom PROFILE.md content")
|
||||
bootstrap_md: Optional[str] = Field(None, description="Custom BOOTSTRAP.md content")
|
||||
model_name: Optional[str] = Field(None, description="Override default LLM model")
|
||||
skills: Optional[List[str]] = Field(None, description="List of skill IDs to enable")
|
||||
tags: Optional[List[str]] = Field(None, description="Classification tags")
|
||||
|
||||
|
||||
class CloneAnalystRequest(BaseModel):
|
||||
"""Request model for cloning an analyst."""
|
||||
source_id: str = Field(..., description="ID of the analyst to clone")
|
||||
new_id: str = Field(..., description="Unique identifier for the new analyst")
|
||||
name: Optional[str] = Field(None, description="New display name")
|
||||
focus_additions: Optional[List[str]] = Field(None, description="Additional focus areas")
|
||||
description_override: Optional[str] = Field(None, description="New description")
|
||||
model_name: Optional[str] = Field(None, description="Override model from source")
|
||||
|
||||
|
||||
class RegisterTypeRequest(BaseModel):
|
||||
"""Request model for registering a new analyst type."""
|
||||
type_id: str = Field(..., description="Unique identifier for this type")
|
||||
name: str = Field(..., description="Display name")
|
||||
focus: List[str] = Field(..., description="List of focus areas")
|
||||
description: str = Field(..., description="Detailed description")
|
||||
preferred_tools: Optional[List[str]] = Field(None, description="Preferred tool categories")
|
||||
|
||||
|
||||
class AnalystResponse(BaseModel):
|
||||
"""Response model for analyst operations."""
|
||||
success: bool
|
||||
agent_id: Optional[str] = None
|
||||
message: str
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
class AnalystTypeResponse(BaseModel):
|
||||
"""Response model for analyst type information."""
|
||||
type_id: str
|
||||
name: str
|
||||
description: str
|
||||
is_builtin: bool
|
||||
source: str
|
||||
|
||||
|
||||
class AnalystInfoResponse(BaseModel):
|
||||
"""Response model for detailed analyst information."""
|
||||
found: bool
|
||||
agent_id: str
|
||||
config: Optional[Dict[str, Any]] = None
|
||||
is_custom: bool = False
|
||||
is_clone: bool = False
|
||||
parent_id: Optional[str] = None
|
||||
message: Optional[str] = None
|
||||
|
||||
|
||||
class TeamSummaryResponse(BaseModel):
|
||||
"""Response model for team summary."""
|
||||
total_analysts: int
|
||||
custom_analysts: int
|
||||
cloned_analysts: int
|
||||
analysts: List[Dict[str, Any]]
|
||||
registered_types: int
|
||||
|
||||
|
||||
# Helper function to get the current pipeline instance
|
||||
|
||||
def _get_pipeline(run_id: str) -> Optional[Any]:
|
||||
"""Get the TradingPipeline instance for a run.
|
||||
|
||||
Args:
|
||||
run_id: The run configuration ID
|
||||
|
||||
Returns:
|
||||
TradingPipeline instance or None if not found
|
||||
"""
|
||||
# Import here to avoid circular imports
|
||||
try:
|
||||
from backend.apps.runtime_service import get_runtime_state
|
||||
runtime_state = get_runtime_state()
|
||||
if runtime_state and hasattr(runtime_state, 'pipeline'):
|
||||
return runtime_state.pipeline
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not get pipeline for run {run_id}: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def _get_controller(run_id: str) -> Optional[Any]:
|
||||
"""Get the DynamicTeamController for a run.
|
||||
|
||||
Args:
|
||||
run_id: The run configuration ID
|
||||
|
||||
Returns:
|
||||
DynamicTeamController instance or None if not available
|
||||
"""
|
||||
try:
|
||||
from backend.tools.dynamic_team_tools import get_controller
|
||||
return get_controller()
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not get controller for run {run_id}: {e}")
|
||||
return None
|
||||
|
||||
|
||||
# API Endpoints
|
||||
|
||||
@router.get("/types", response_model=List[AnalystTypeResponse])
|
||||
async def list_analyst_types() -> List[AnalystTypeResponse]:
|
||||
"""List all available analyst types.
|
||||
|
||||
Returns both built-in types (from ANALYST_TYPES) and runtime-registered types.
|
||||
"""
|
||||
result = []
|
||||
|
||||
# Add built-in types
|
||||
for type_id, info in ANALYST_TYPES.items():
|
||||
result.append(AnalystTypeResponse(
|
||||
type_id=type_id,
|
||||
name=info.get("display_name", type_id),
|
||||
description=info.get("description", ""),
|
||||
is_builtin=True,
|
||||
source="constants",
|
||||
))
|
||||
|
||||
# Try to get runtime registered types
|
||||
controller = _get_controller("default")
|
||||
if controller:
|
||||
for type_id, persona in controller._registered_types.items():
|
||||
result.append(AnalystTypeResponse(
|
||||
type_id=type_id,
|
||||
name=persona.name,
|
||||
description=persona.description,
|
||||
is_builtin=False,
|
||||
source="runtime",
|
||||
))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@router.get("/personas")
|
||||
async def get_personas() -> Dict[str, Any]:
|
||||
"""Get all analyst personas from personas.yaml.
|
||||
|
||||
Returns the persona definitions used for analyst initialization.
|
||||
"""
|
||||
try:
|
||||
personas = get_prompt_loader().load_yaml_config("analyst", "personas")
|
||||
return {"success": True, "personas": personas}
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load personas: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Failed to load personas: {e}")
|
||||
|
||||
|
||||
@router.post("/runs/{run_id}/analysts", response_model=AnalystResponse)
|
||||
async def create_analyst(
|
||||
run_id: str,
|
||||
request: CreateAnalystRequest,
|
||||
) -> AnalystResponse:
|
||||
"""Create a new analyst in the specified run.
|
||||
|
||||
Args:
|
||||
run_id: The run configuration ID
|
||||
request: Analyst creation configuration
|
||||
|
||||
Returns:
|
||||
Result of the creation operation
|
||||
"""
|
||||
controller = _get_controller(run_id)
|
||||
if not controller:
|
||||
raise HTTPException(
|
||||
status_code=503,
|
||||
detail="Dynamic team controller not available. Is the pipeline running?"
|
||||
)
|
||||
|
||||
# Build persona if provided
|
||||
persona = None
|
||||
if request.persona:
|
||||
persona = AnalystPersona(
|
||||
name=request.persona.name,
|
||||
focus=request.persona.focus,
|
||||
description=request.persona.description,
|
||||
preferred_tools=request.persona.preferred_tools,
|
||||
icon=request.persona.icon,
|
||||
)
|
||||
|
||||
# Build config
|
||||
config = AnalystConfig(
|
||||
persona=persona,
|
||||
analyst_type=request.analyst_type if request.analyst_type in ANALYST_TYPES else None,
|
||||
soul_md=request.soul_md,
|
||||
agents_md=request.agents_md,
|
||||
profile_md=request.profile_md,
|
||||
bootstrap_md=request.bootstrap_md,
|
||||
model_name=request.model_name,
|
||||
skills=request.skills or [],
|
||||
tags=request.tags or [],
|
||||
)
|
||||
|
||||
# Create the analyst
|
||||
result = controller.create_analyst(
|
||||
agent_id=request.agent_id,
|
||||
analyst_type=request.analyst_type,
|
||||
name=persona.name if persona else None,
|
||||
focus=persona.focus if persona else None,
|
||||
description=persona.description if persona else None,
|
||||
soul_md=config.soul_md,
|
||||
agents_md=config.agents_md,
|
||||
model_name=config.model_name,
|
||||
)
|
||||
|
||||
return AnalystResponse(**result)
|
||||
|
||||
|
||||
@router.post("/runs/{run_id}/analysts/clone", response_model=AnalystResponse)
|
||||
async def clone_analyst(
|
||||
run_id: str,
|
||||
request: CloneAnalystRequest,
|
||||
) -> AnalystResponse:
|
||||
"""Clone an existing analyst.
|
||||
|
||||
Args:
|
||||
run_id: The run configuration ID
|
||||
request: Clone configuration
|
||||
|
||||
Returns:
|
||||
Result of the clone operation
|
||||
"""
|
||||
controller = _get_controller(run_id)
|
||||
if not controller:
|
||||
raise HTTPException(
|
||||
status_code=503,
|
||||
detail="Dynamic team controller not available. Is the pipeline running?"
|
||||
)
|
||||
|
||||
result = controller.clone_analyst(
|
||||
source_id=request.source_id,
|
||||
new_id=request.new_id,
|
||||
name=request.name,
|
||||
focus_additions=request.focus_additions,
|
||||
description_override=request.description_override,
|
||||
model_name=request.model_name,
|
||||
)
|
||||
|
||||
return AnalystResponse(**result)
|
||||
|
||||
|
||||
@router.delete("/runs/{run_id}/analysts/{agent_id}", response_model=AnalystResponse)
|
||||
async def remove_analyst(run_id: str, agent_id: str) -> AnalystResponse:
|
||||
"""Remove a dynamically created analyst.
|
||||
|
||||
Args:
|
||||
run_id: The run configuration ID
|
||||
agent_id: The analyst to remove
|
||||
|
||||
Returns:
|
||||
Result of the removal operation
|
||||
"""
|
||||
controller = _get_controller(run_id)
|
||||
if not controller:
|
||||
raise HTTPException(
|
||||
status_code=503,
|
||||
detail="Dynamic team controller not available. Is the pipeline running?"
|
||||
)
|
||||
|
||||
result = controller.remove_analyst(agent_id)
|
||||
return AnalystResponse(**result)
|
||||
|
||||
|
||||
@router.get("/runs/{run_id}/analysts/{agent_id}", response_model=AnalystInfoResponse)
|
||||
async def get_analyst_info(run_id: str, agent_id: str) -> AnalystInfoResponse:
|
||||
"""Get information about a specific analyst.
|
||||
|
||||
Args:
|
||||
run_id: The run configuration ID
|
||||
agent_id: The analyst ID
|
||||
|
||||
Returns:
|
||||
Analyst configuration and status
|
||||
"""
|
||||
controller = _get_controller(run_id)
|
||||
if not controller:
|
||||
raise HTTPException(
|
||||
status_code=503,
|
||||
detail="Dynamic team controller not available. Is the pipeline running?"
|
||||
)
|
||||
|
||||
result = controller.get_analyst_info(agent_id)
|
||||
return AnalystInfoResponse(**result)
|
||||
|
||||
|
||||
@router.get("/runs/{run_id}/summary", response_model=TeamSummaryResponse)
|
||||
async def get_team_summary(run_id: str) -> TeamSummaryResponse:
|
||||
"""Get a summary of the current analyst team.
|
||||
|
||||
Args:
|
||||
run_id: The run configuration ID
|
||||
|
||||
Returns:
|
||||
Team composition information
|
||||
"""
|
||||
controller = _get_controller(run_id)
|
||||
if not controller:
|
||||
raise HTTPException(
|
||||
status_code=503,
|
||||
detail="Dynamic team controller not available. Is the pipeline running?"
|
||||
)
|
||||
|
||||
result = controller.get_team_summary()
|
||||
return TeamSummaryResponse(**result)
|
||||
|
||||
|
||||
@router.post("/runs/{run_id}/types", response_model=AnalystTypeResponse)
|
||||
async def register_analyst_type(
|
||||
run_id: str,
|
||||
request: RegisterTypeRequest,
|
||||
) -> AnalystTypeResponse:
|
||||
"""Register a new analyst type.
|
||||
|
||||
Args:
|
||||
run_id: The run configuration ID
|
||||
request: Type registration configuration
|
||||
|
||||
Returns:
|
||||
Registered type information
|
||||
"""
|
||||
controller = _get_controller(run_id)
|
||||
if not controller:
|
||||
raise HTTPException(
|
||||
status_code=503,
|
||||
detail="Dynamic team controller not available. Is the pipeline running?"
|
||||
)
|
||||
|
||||
result = controller.register_analyst_type(
|
||||
type_id=request.type_id,
|
||||
name=request.name,
|
||||
focus=request.focus,
|
||||
description=request.description,
|
||||
preferred_tools=request.preferred_tools,
|
||||
)
|
||||
|
||||
if not result.get("success", False):
|
||||
raise HTTPException(status_code=400, detail=result.get("message", "Registration failed"))
|
||||
|
||||
return AnalystTypeResponse(
|
||||
type_id=request.type_id,
|
||||
name=request.name,
|
||||
description=request.description,
|
||||
is_builtin=False,
|
||||
source="runtime",
|
||||
)
|
||||
@@ -7,7 +7,7 @@ Provides REST API endpoints for tool guard operations.
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
from datetime import UTC, datetime
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
@@ -146,7 +146,7 @@ async def check_tool_call(
|
||||
|
||||
if request.tool_name in SAFE_TOOLS:
|
||||
record.status = ApprovalStatus.APPROVED
|
||||
record.resolved_at = datetime.now(UTC)
|
||||
record.resolved_at = datetime.now(timezone.utc)
|
||||
record.resolved_by = "system"
|
||||
STORE.set_status(
|
||||
record.approval_id,
|
||||
|
||||
@@ -6,7 +6,7 @@ Provides REST API endpoints for runtime agent asset access under `runs/<run_id>/
|
||||
|
||||
This module separates runtime concerns from design-time workspace management:
|
||||
- `/api/runs/{run_id}/agents/*` - Runtime agent assets and configuration
|
||||
- `/api/workspaces/{workspace_id}/agents/*` - Design-time workspace registry (deprecated)
|
||||
- design-time workspace registry CRUD lives under `/api/workspaces/{workspace_id}/...`
|
||||
"""
|
||||
import logging
|
||||
import os
|
||||
|
||||
@@ -7,6 +7,7 @@ import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
@@ -29,6 +30,17 @@ router = APIRouter(prefix="/api/runtime", tags=["runtime"])
|
||||
PROJECT_ROOT = Path(__file__).resolve().parents[2]
|
||||
|
||||
|
||||
def _normalize_schedule_mode(value: Any) -> str:
|
||||
"""Normalize schedule mode to the current public vocabulary.
|
||||
|
||||
`intraday` is kept as a backward-compatible alias for `interval`.
|
||||
"""
|
||||
mode = str(value or "daily").strip().lower()
|
||||
if mode == "intraday":
|
||||
return "interval"
|
||||
return mode or "daily"
|
||||
|
||||
|
||||
class RuntimeState:
|
||||
"""Thread-safe singleton for managing runtime state.
|
||||
|
||||
@@ -143,6 +155,7 @@ class RunContextResponse(BaseModel):
|
||||
|
||||
class RuntimeAgentState(BaseModel):
|
||||
agent_id: str
|
||||
display_name: Optional[str] = None
|
||||
status: str
|
||||
last_session: Optional[str] = None
|
||||
last_updated: str
|
||||
@@ -289,6 +302,70 @@ def _load_run_server_state(run_dir: Path) -> Dict[str, Any]:
|
||||
return {}
|
||||
|
||||
|
||||
def _resolve_runtime_agent_display_name(run_id: str, agent_id: str) -> Optional[str]:
|
||||
"""Best-effort display name for one runtime agent.
|
||||
|
||||
Priority:
|
||||
1. PROFILE.md line like `角色定位:中文名`
|
||||
2. PROFILE.md YAML frontmatter field `name`
|
||||
"""
|
||||
asset_dir = PROJECT_ROOT / "runs" / run_id / "agents" / agent_id
|
||||
profile_path = asset_dir / "PROFILE.md"
|
||||
if not profile_path.exists():
|
||||
return None
|
||||
|
||||
try:
|
||||
raw = profile_path.read_text(encoding="utf-8").strip()
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
if not raw:
|
||||
return None
|
||||
|
||||
frontmatter_name: Optional[str] = None
|
||||
if raw.startswith("---"):
|
||||
parts = raw.split("---", 2)
|
||||
if len(parts) >= 3:
|
||||
try:
|
||||
import yaml
|
||||
parsed = yaml.safe_load(parts[1].strip()) or {}
|
||||
if isinstance(parsed, dict):
|
||||
value = parsed.get("name")
|
||||
if isinstance(value, str) and value.strip():
|
||||
frontmatter_name = value.strip()
|
||||
except Exception:
|
||||
pass
|
||||
raw = parts[2].strip()
|
||||
|
||||
for line in raw.splitlines():
|
||||
normalized = line.strip()
|
||||
if normalized.startswith("角色定位:"):
|
||||
value = normalized.split(":", 1)[1].strip()
|
||||
if value:
|
||||
return value
|
||||
if normalized.lower().startswith("role:"):
|
||||
value = normalized.split(":", 1)[1].strip()
|
||||
if value:
|
||||
return value
|
||||
|
||||
return frontmatter_name
|
||||
|
||||
|
||||
def _enrich_runtime_agents(run_id: Optional[str], agents: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
if not run_id:
|
||||
return agents
|
||||
|
||||
enriched: List[Dict[str, Any]] = []
|
||||
for item in agents:
|
||||
payload = dict(item)
|
||||
display_name = payload.get("display_name")
|
||||
agent_id = str(payload.get("agent_id") or "").strip()
|
||||
if agent_id and (not isinstance(display_name, str) or not display_name.strip()):
|
||||
payload["display_name"] = _resolve_runtime_agent_display_name(run_id, agent_id)
|
||||
enriched.append(payload)
|
||||
return enriched
|
||||
|
||||
|
||||
def _extract_history_metrics(run_dir: Path) -> tuple[int, Optional[float]]:
|
||||
"""Prefer runtime state files over dashboard exports for history summaries."""
|
||||
server_state = _load_run_server_state(run_dir)
|
||||
@@ -439,6 +516,11 @@ def _is_gateway_running() -> bool:
|
||||
|
||||
Checks both the internally-managed gateway process and falls back to
|
||||
port availability (for externally-managed gateway processes).
|
||||
|
||||
The fallback matters because this codebase may still encounter two startup
|
||||
shapes while historical artifacts remain in-tree:
|
||||
1. runtime_service-managed Gateway subprocesses
|
||||
2. externally started historical Gateway processes outside the supported dev flow
|
||||
"""
|
||||
process = _runtime_state.gateway_process
|
||||
if process is not None and process.poll() is None:
|
||||
@@ -481,7 +563,11 @@ def _start_gateway_process(
|
||||
bootstrap: Dict[str, Any],
|
||||
port: int
|
||||
) -> subprocess.Popen:
|
||||
"""Start Gateway as a separate process."""
|
||||
"""Start Gateway as a runtime_service-managed subprocess.
|
||||
|
||||
This path is used when runtime lifecycle is driven through the runtime API.
|
||||
It is not the only supported way a Gateway may exist in the current repo.
|
||||
"""
|
||||
# Validate configuration before starting
|
||||
validation_errors = _validate_gateway_config(bootstrap)
|
||||
if validation_errors:
|
||||
@@ -546,10 +632,11 @@ def _validate_gateway_config(bootstrap: Dict[str, Any]) -> List[str]:
|
||||
# Check LLM configuration
|
||||
model_name = os.getenv("MODEL_NAME")
|
||||
openai_key = os.getenv("OPENAI_API_KEY")
|
||||
dashscope_key = os.getenv("DASHSCOPE_API_KEY")
|
||||
if not model_name:
|
||||
errors.append("MODEL_NAME environment variable is not set")
|
||||
if not openai_key:
|
||||
errors.append("OPENAI_API_KEY environment variable is not set")
|
||||
if not openai_key and not dashscope_key:
|
||||
errors.append("Either OPENAI_API_KEY or DASHSCOPE_API_KEY environment variable must be set")
|
||||
|
||||
# Validate tickers
|
||||
tickers = bootstrap.get("tickers", [])
|
||||
@@ -592,9 +679,9 @@ def _validate_gateway_config(bootstrap: Dict[str, Any]) -> List[str]:
|
||||
errors.append("Dates must be in YYYY-MM-DD format")
|
||||
|
||||
# Validate schedule mode
|
||||
schedule_mode = bootstrap.get("schedule_mode", "daily")
|
||||
if schedule_mode not in ("daily", "intraday"):
|
||||
errors.append(f"Invalid schedule_mode '{schedule_mode}': must be 'daily' or 'intraday'")
|
||||
schedule_mode = _normalize_schedule_mode(bootstrap.get("schedule_mode", "daily"))
|
||||
if schedule_mode not in ("daily", "interval"):
|
||||
errors.append(f"Invalid schedule_mode '{schedule_mode}': must be 'daily' or 'interval'")
|
||||
|
||||
return errors
|
||||
|
||||
@@ -702,7 +789,8 @@ async def get_run_context() -> RunContextResponse:
|
||||
async def get_runtime_agents() -> RuntimeAgentsResponse:
|
||||
"""Return agent states from the active runtime, or latest persisted run."""
|
||||
snapshot = _get_active_runtime_snapshot() if _is_gateway_running() else _load_latest_runtime_snapshot()
|
||||
agents = snapshot.get("agents", [])
|
||||
run_id = snapshot.get("context", {}).get("config_name")
|
||||
agents = _enrich_runtime_agents(run_id, snapshot.get("agents", []))
|
||||
|
||||
return RuntimeAgentsResponse(
|
||||
agents=[RuntimeAgentState(**a) for a in agents]
|
||||
@@ -778,7 +866,7 @@ async def get_runtime_mode() -> RuntimeModeResponse:
|
||||
mode=mode,
|
||||
is_backtest=mode == "backtest",
|
||||
run_id=context.get("config_name"),
|
||||
schedule_mode=bootstrap.get("schedule_mode"),
|
||||
schedule_mode=_normalize_schedule_mode(bootstrap.get("schedule_mode")),
|
||||
is_running=True,
|
||||
)
|
||||
except HTTPException:
|
||||
@@ -849,11 +937,24 @@ def _load_latest_runtime_snapshot() -> Dict[str, Any]:
|
||||
|
||||
|
||||
def _get_active_runtime_snapshot() -> Dict[str, Any]:
|
||||
"""Return the active runtime snapshot, preferring in-memory manager state."""
|
||||
"""Return the active runtime snapshot.
|
||||
|
||||
For a running Gateway, the canonical runtime source of truth is the
|
||||
run-scoped snapshot file under `runs/<run_id>/state/runtime_state.json`,
|
||||
because the Gateway subprocess mutates it directly while the parent
|
||||
runtime_service process may still hold a stale in-memory manager snapshot.
|
||||
"""
|
||||
if not _is_gateway_running():
|
||||
raise HTTPException(status_code=404, detail="No runtime is currently running")
|
||||
|
||||
manager = _runtime_state.runtime_manager
|
||||
if manager is not None:
|
||||
run_id = str(getattr(manager, "config_name", "") or "").strip()
|
||||
if run_id:
|
||||
snapshot_path = _get_run_dir(run_id) / "state" / "runtime_state.json"
|
||||
if snapshot_path.exists():
|
||||
return json.loads(snapshot_path.read_text(encoding="utf-8"))
|
||||
|
||||
if manager is not None and hasattr(manager, "build_snapshot"):
|
||||
snapshot = manager.build_snapshot()
|
||||
context = snapshot.get("context") or {}
|
||||
@@ -880,11 +981,32 @@ def _read_log_tail(path: Path, max_chars: int = 120_000) -> str:
|
||||
if not path.exists() or not path.is_file():
|
||||
return ""
|
||||
text = path.read_text(encoding="utf-8", errors="replace")
|
||||
text = _sanitize_runtime_log_text(text)
|
||||
if len(text) <= max_chars:
|
||||
return text
|
||||
return text[-max_chars:]
|
||||
|
||||
|
||||
def _sanitize_runtime_log_text(text: str) -> str:
|
||||
if not text:
|
||||
return ""
|
||||
|
||||
# Drop repetitive development-only warnings for unsandboxed skill execution.
|
||||
text = re.sub(
|
||||
r"(?:^|\n)=+\n"
|
||||
r"⚠️\s+\[安全警告\]\s+技能在无沙盒模式下运行\s+\(SKILL_SANDBOX_MODE=none\)\n"
|
||||
r"\s+技能脚本将直接在当前进程中执行,无隔离保护。\n"
|
||||
r"\s+建议:生产环境请设置\s+SKILL_SANDBOX_MODE=docker\n"
|
||||
r"=+\n?",
|
||||
"\n",
|
||||
text,
|
||||
flags=re.MULTILINE,
|
||||
)
|
||||
|
||||
text = re.sub(r"\n{3,}", "\n\n", text)
|
||||
return text.strip()
|
||||
|
||||
|
||||
def _get_current_runtime_context() -> Dict[str, Any]:
|
||||
"""Return the active runtime context from the latest snapshot."""
|
||||
if not _is_gateway_running():
|
||||
@@ -909,7 +1031,7 @@ def _resolve_runtime_response(run_id: str) -> RuntimeConfigResponse:
|
||||
project_root=PROJECT_ROOT,
|
||||
config_name=run_id,
|
||||
enable_memory=bool(bootstrap.get("enable_memory", False)),
|
||||
schedule_mode=str(bootstrap.get("schedule_mode", "daily")),
|
||||
schedule_mode=_normalize_schedule_mode(bootstrap.get("schedule_mode", "daily")),
|
||||
interval_minutes=int(bootstrap.get("interval_minutes", 60) or 60),
|
||||
trigger_time=str(bootstrap.get("trigger_time", "09:30") or "09:30"),
|
||||
)
|
||||
@@ -929,11 +1051,11 @@ def _normalize_runtime_config_updates(
|
||||
updates: Dict[str, Any] = {}
|
||||
|
||||
if request.schedule_mode is not None:
|
||||
schedule_mode = str(request.schedule_mode).strip().lower()
|
||||
if schedule_mode not in {"daily", "intraday"}:
|
||||
schedule_mode = _normalize_schedule_mode(request.schedule_mode)
|
||||
if schedule_mode not in {"daily", "interval"}:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="schedule_mode must be 'daily' or 'intraday'",
|
||||
detail="schedule_mode must be 'daily' or 'interval'",
|
||||
)
|
||||
updates["schedule_mode"] = schedule_mode
|
||||
|
||||
|
||||
@@ -31,8 +31,7 @@ def _build_scope_payload(project_root: Path) -> dict[str, object]:
|
||||
},
|
||||
"agent_route_note": (
|
||||
"Runtime routes use `/api/runs/{run_id}/agents/...`. "
|
||||
"Legacy `/api/workspaces/{workspace_id}/agents/...` routes are deprecated "
|
||||
"but remain for backward compatibility."
|
||||
"Design-time CRUD routes use `/api/workspaces/{workspace_id}/agents/...`."
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
@@ -81,7 +81,12 @@ async def proxy_ws(ws: WebSocket):
|
||||
await ws.accept()
|
||||
upstream = None
|
||||
try:
|
||||
upstream = await websockets.asyncio.client.connect(gateway_url)
|
||||
upstream = await websockets.asyncio.client.connect(
|
||||
gateway_url,
|
||||
ping_interval=20,
|
||||
ping_timeout=120,
|
||||
max_size=10 * 1024 * 1024, # 10MB
|
||||
)
|
||||
|
||||
async def client_to_upstream():
|
||||
try:
|
||||
|
||||
@@ -28,11 +28,11 @@ def create_app() -> FastAPI:
|
||||
add_cors_middleware(app)
|
||||
|
||||
@app.get("/health")
|
||||
async def health_check() -> dict[str, str]:
|
||||
def health_check() -> dict[str, str]:
|
||||
return {"status": "healthy", "service": "news-service"}
|
||||
|
||||
@app.get("/api/enriched-news")
|
||||
async def api_get_enriched_news(
|
||||
def api_get_enriched_news(
|
||||
ticker: str = Query(..., min_length=1),
|
||||
start_date: str | None = Query(None),
|
||||
end_date: str | None = Query(None),
|
||||
@@ -49,7 +49,7 @@ def create_app() -> FastAPI:
|
||||
)
|
||||
|
||||
@app.get("/api/news-for-date")
|
||||
async def api_get_news_for_date(
|
||||
def api_get_news_for_date(
|
||||
ticker: str = Query(..., min_length=1),
|
||||
date: str = Query(...),
|
||||
limit: int = Query(20, ge=1, le=100),
|
||||
@@ -64,7 +64,7 @@ def create_app() -> FastAPI:
|
||||
)
|
||||
|
||||
@app.get("/api/news-timeline")
|
||||
async def api_get_news_timeline(
|
||||
def api_get_news_timeline(
|
||||
ticker: str = Query(..., min_length=1),
|
||||
start_date: str = Query(...),
|
||||
end_date: str = Query(...),
|
||||
@@ -79,7 +79,7 @@ def create_app() -> FastAPI:
|
||||
)
|
||||
|
||||
@app.get("/api/categories")
|
||||
async def api_get_categories(
|
||||
def api_get_categories(
|
||||
ticker: str = Query(..., min_length=1),
|
||||
start_date: str | None = Query(None),
|
||||
end_date: str | None = Query(None),
|
||||
@@ -96,7 +96,7 @@ def create_app() -> FastAPI:
|
||||
)
|
||||
|
||||
@app.get("/api/similar-days")
|
||||
async def api_get_similar_days(
|
||||
def api_get_similar_days(
|
||||
ticker: str = Query(..., min_length=1),
|
||||
date: str = Query(...),
|
||||
n_similar: int = Query(5, ge=1, le=20),
|
||||
@@ -111,7 +111,7 @@ def create_app() -> FastAPI:
|
||||
)
|
||||
|
||||
@app.get("/api/stories/{ticker}")
|
||||
async def api_get_story(
|
||||
def api_get_story(
|
||||
ticker: str,
|
||||
as_of_date: str = Query(...),
|
||||
store: MarketStore = Depends(get_market_store),
|
||||
@@ -124,7 +124,7 @@ def create_app() -> FastAPI:
|
||||
)
|
||||
|
||||
@app.get("/api/range-explain")
|
||||
async def api_get_range_explain(
|
||||
def api_get_range_explain(
|
||||
ticker: str = Query(..., min_length=1),
|
||||
start_date: str = Query(...),
|
||||
end_date: str = Query(...),
|
||||
|
||||
@@ -5,7 +5,7 @@ from __future__ import annotations
|
||||
|
||||
from fastapi import FastAPI
|
||||
|
||||
from backend.api import runtime_router
|
||||
from backend.api import runtime_router, dynamic_team_router
|
||||
from backend.api.runtime import get_runtime_state, _check_gateway_health, _get_gateway_process_details
|
||||
from backend.apps.cors import add_cors_middleware
|
||||
|
||||
@@ -78,6 +78,7 @@ def create_app() -> FastAPI:
|
||||
}
|
||||
|
||||
app.include_router(runtime_router)
|
||||
app.include_router(dynamic_team_router)
|
||||
return app
|
||||
|
||||
|
||||
|
||||
@@ -29,12 +29,12 @@ def create_app() -> FastAPI:
|
||||
add_cors_middleware(app)
|
||||
|
||||
@app.get("/health")
|
||||
async def health_check() -> dict[str, str]:
|
||||
def health_check() -> dict[str, str]:
|
||||
"""Health check endpoint."""
|
||||
return {"status": "healthy", "service": "trading-service"}
|
||||
|
||||
@app.get("/api/prices", response_model=PriceResponse)
|
||||
async def api_get_prices(
|
||||
def api_get_prices(
|
||||
ticker: str = Query(..., min_length=1),
|
||||
start_date: str = Query(...),
|
||||
end_date: str = Query(...),
|
||||
@@ -47,7 +47,7 @@ def create_app() -> FastAPI:
|
||||
return PriceResponse(ticker=payload["ticker"], prices=payload["prices"])
|
||||
|
||||
@app.get("/api/financials", response_model=FinancialMetricsResponse)
|
||||
async def api_get_financials(
|
||||
def api_get_financials(
|
||||
ticker: str = Query(..., min_length=1),
|
||||
end_date: str = Query(...),
|
||||
period: str = Query("ttm"),
|
||||
@@ -62,7 +62,7 @@ def create_app() -> FastAPI:
|
||||
return FinancialMetricsResponse(financial_metrics=payload["financial_metrics"])
|
||||
|
||||
@app.get("/api/news", response_model=CompanyNewsResponse)
|
||||
async def api_get_news(
|
||||
def api_get_news(
|
||||
ticker: str = Query(..., min_length=1),
|
||||
end_date: str = Query(...),
|
||||
start_date: str | None = Query(None),
|
||||
@@ -77,7 +77,7 @@ def create_app() -> FastAPI:
|
||||
return CompanyNewsResponse(news=payload["news"])
|
||||
|
||||
@app.get("/api/insider-trades", response_model=InsiderTradeResponse)
|
||||
async def api_get_insider_trades(
|
||||
def api_get_insider_trades(
|
||||
ticker: str = Query(..., min_length=1),
|
||||
end_date: str = Query(...),
|
||||
start_date: str | None = Query(None),
|
||||
@@ -92,12 +92,12 @@ def create_app() -> FastAPI:
|
||||
return InsiderTradeResponse(insider_trades=payload["insider_trades"])
|
||||
|
||||
@app.get("/api/market/status")
|
||||
async def api_get_market_status() -> dict[str, Any]:
|
||||
def api_get_market_status() -> dict[str, Any]:
|
||||
"""Return current market status using the existing market service logic."""
|
||||
return trading_domain.get_market_status_payload()
|
||||
|
||||
@app.get("/api/market-cap")
|
||||
async def api_get_market_cap(
|
||||
def api_get_market_cap(
|
||||
ticker: str = Query(..., min_length=1),
|
||||
end_date: str = Query(...),
|
||||
) -> dict[str, Any]:
|
||||
@@ -108,7 +108,7 @@ def create_app() -> FastAPI:
|
||||
)
|
||||
|
||||
@app.get("/api/line-items", response_model=LineItemResponse)
|
||||
async def api_get_line_items(
|
||||
def api_get_line_items(
|
||||
ticker: str = Query(..., min_length=1),
|
||||
line_items: list[str] = Query(...),
|
||||
end_date: str = Query(...),
|
||||
|
||||
@@ -27,8 +27,10 @@ valuation_analyst:
|
||||
portfolio_manager:
|
||||
skills:
|
||||
- portfolio_decisioning
|
||||
- dynamic_team_management
|
||||
active_tool_groups:
|
||||
- portfolio_ops
|
||||
- dynamic_team
|
||||
|
||||
risk_manager:
|
||||
skills:
|
||||
|
||||
@@ -131,6 +131,13 @@ def _coerce_bool(value: Any) -> bool:
|
||||
return bool(value)
|
||||
|
||||
|
||||
def _normalize_schedule_mode(value: Any) -> str:
|
||||
mode = str(value or "daily").strip().lower()
|
||||
if mode == "intraday":
|
||||
return "interval"
|
||||
return mode or "daily"
|
||||
|
||||
|
||||
def resolve_runtime_config(
|
||||
project_root: Path,
|
||||
config_name: str,
|
||||
@@ -162,9 +169,9 @@ def resolve_runtime_config(
|
||||
get_env_int("MAX_COMM_CYCLES", 2),
|
||||
),
|
||||
),
|
||||
"schedule_mode": str(
|
||||
"schedule_mode": _normalize_schedule_mode(
|
||||
bootstrap.get("schedule_mode", schedule_mode),
|
||||
).strip().lower() or schedule_mode,
|
||||
),
|
||||
"interval_minutes": int(
|
||||
bootstrap.get(
|
||||
"interval_minutes",
|
||||
|
||||
197
backend/core/apo.py
Normal file
@@ -0,0 +1,197 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Autonomous Policy Optimizer (APO)
|
||||
Automatically tunes agent policies based on performance feedback.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from agentscope.message import Msg
|
||||
from backend.llm.models import get_agent_model, get_agent_formatter
|
||||
from backend.agents.workspace_manager import WorkspaceManager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class PolicyOptimizer:
|
||||
"""
|
||||
PolicyOptimizer analyzes trading performance and automatically updates
|
||||
agent workspace files (POLICY.md, AGENTS.md) to improve future results.
|
||||
"""
|
||||
|
||||
def __init__(self, config_name: str, project_root: Optional[Path] = None):
|
||||
self.config_name = config_name
|
||||
self.workspace_manager = WorkspaceManager(project_root=project_root)
|
||||
# Use a high-capability model for the optimizer (meta-agent)
|
||||
self.model = get_agent_model("portfolio_manager")
|
||||
self.formatter = get_agent_formatter("portfolio_manager")
|
||||
|
||||
async def run_optimization(
|
||||
self,
|
||||
date: str,
|
||||
reflection_content: str,
|
||||
settlement_result: Dict[str, Any],
|
||||
analyst_results: List[Dict[str, Any]],
|
||||
decisions: Dict[str, Dict],
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Run the optimization loop if performance indicates a need for change.
|
||||
"""
|
||||
total_pnl = settlement_result.get("portfolio_value", 0) - 100000.0 # Assuming 100k initial
|
||||
# You might want to use a more sophisticated trigger, like 3 consecutive losses
|
||||
if total_pnl >= 0:
|
||||
logger.info(f"APO: Positive P&L (${total_pnl:,.2f}) for {date}, skipping optimization.")
|
||||
return {"status": "skipped", "reason": "positive_pnl"}
|
||||
|
||||
logger.info(f"APO: Negative P&L (${total_pnl:,.2f}) detected for {date}. Starting optimization...")
|
||||
|
||||
# 1. Identify underperforming agents or logic
|
||||
# 2. Generate policy updates
|
||||
# 3. Apply updates
|
||||
|
||||
optimizations = []
|
||||
|
||||
# Focus on agents that gave high confidence but wrong direction
|
||||
underperformers = self._identify_underperformers(settlement_result, analyst_results)
|
||||
|
||||
for agent_id in underperformers:
|
||||
update = await self._generate_policy_update(
|
||||
agent_id,
|
||||
date,
|
||||
reflection_content,
|
||||
settlement_result,
|
||||
analyst_results,
|
||||
decisions
|
||||
)
|
||||
if update:
|
||||
self._apply_update(agent_id, update)
|
||||
optimizations.append({
|
||||
"agent_id": agent_id,
|
||||
"file": update.get("file", "POLICY.md"),
|
||||
"change": update.get("change", "")
|
||||
})
|
||||
|
||||
return {
|
||||
"status": "completed",
|
||||
"date": date,
|
||||
"total_pnl": total_pnl,
|
||||
"optimizations": optimizations
|
||||
}
|
||||
|
||||
def _identify_underperformers(
|
||||
self,
|
||||
settlement_result: Dict[str, Any],
|
||||
analyst_results: List[Dict[str, Any]]
|
||||
) -> List[str]:
|
||||
"""Identify which agents might need policy adjustments."""
|
||||
underperformers = []
|
||||
|
||||
# Simple logic: if the overall day was a loss, all active analysts might need a check,
|
||||
# but specifically those whose predictions didn't match the market.
|
||||
# For now, let's include all analysts involved in the day.
|
||||
for result in analyst_results:
|
||||
agent_id = result.get("agent")
|
||||
if agent_id:
|
||||
underperformers.append(agent_id)
|
||||
|
||||
# Also include PM and Risk Manager as they are critical
|
||||
underperformers.append("portfolio_manager")
|
||||
underperformers.append("risk_manager")
|
||||
|
||||
return list(set(underperformers))
|
||||
|
||||
async def _generate_policy_update(
|
||||
self,
|
||||
agent_id: str,
|
||||
date: str,
|
||||
reflection_content: str,
|
||||
settlement_result: Dict[str, Any],
|
||||
analyst_results: List[Dict[str, Any]],
|
||||
decisions: Dict[str, Dict],
|
||||
) -> Optional[Dict[str, str]]:
|
||||
"""Use LLM to generate a specific policy update for an agent."""
|
||||
|
||||
# Load current policy
|
||||
try:
|
||||
current_policy = self.workspace_manager.load_agent_file(
|
||||
config_name=self.config_name,
|
||||
agent_id=agent_id,
|
||||
filename="POLICY.md"
|
||||
)
|
||||
except Exception:
|
||||
current_policy = "No existing policy found."
|
||||
|
||||
prompt = f"""
|
||||
As an Expert Meta-Optimizer for a multi-agent trading system, your task is to update the operational POLICY for an agent named '{agent_id}' based on recent performance failures.
|
||||
|
||||
[Current Context]
|
||||
Date: {date}
|
||||
Daily Reflection:
|
||||
{reflection_content}
|
||||
|
||||
[Agent's Current POLICY.md]
|
||||
{current_policy}
|
||||
|
||||
[Task]
|
||||
Analyze why the system failed (loss occurred). Identify what '{agent_id}' could have done differently or what new constraint/heuristic should be added to its policy to prevent similar mistakes in the future.
|
||||
|
||||
Provide a specific, concise addition or modification to the POLICY.md file.
|
||||
The output MUST be a JSON object with:
|
||||
1. "reasoning": Brief explanation of why this change is needed.
|
||||
2. "file": Always "POLICY.md".
|
||||
3. "change": The EXACT markdown text to APPEND or REPLACE in the file. Keep it in Chinese as the system uses Chinese prompts.
|
||||
|
||||
Output ONLY the JSON object.
|
||||
"""
|
||||
msg = Msg(name="system", content=prompt, role="user")
|
||||
response = await self.model.reply(msg)
|
||||
|
||||
content = response.content
|
||||
if isinstance(content, list):
|
||||
content = content[0].get("text", "")
|
||||
|
||||
# Clean JSON if wrapped in markdown
|
||||
if "```json" in content:
|
||||
content = content.split("```json")[1].split("```")[0].strip()
|
||||
|
||||
try:
|
||||
return json.loads(content)
|
||||
except Exception as e:
|
||||
logger.error(f"APO: Failed to parse optimization response for {agent_id}: {e}")
|
||||
return None
|
||||
|
||||
def _apply_update(self, agent_id: str, update: Dict[str, str]) -> None:
|
||||
"""Apply the suggested update to the agent's workspace."""
|
||||
filename = update.get("file", "POLICY.md")
|
||||
change = update.get("change", "")
|
||||
|
||||
if not change:
|
||||
return
|
||||
|
||||
try:
|
||||
current_content = self.workspace_manager.load_agent_file(
|
||||
config_name=self.config_name,
|
||||
agent_id=agent_id,
|
||||
filename=filename
|
||||
)
|
||||
|
||||
# Check if change is already there to avoid duplicates
|
||||
if change.strip() in current_content:
|
||||
logger.info(f"APO: Change already present in {agent_id}/{filename}")
|
||||
return
|
||||
|
||||
new_content = current_content + "\n\n### APO Update (" + datetime.now().strftime("%Y-%m-%d") + ")\n" + change
|
||||
|
||||
self.workspace_manager.update_agent_file(
|
||||
config_name=self.config_name,
|
||||
agent_id=agent_id,
|
||||
filename=filename,
|
||||
content=new_content
|
||||
)
|
||||
logger.info(f"APO: Updated {agent_id}/{filename} with new heuristics.")
|
||||
except Exception as e:
|
||||
logger.error(f"APO: Failed to apply update to {agent_id}/{filename}: {e}")
|
||||
@@ -6,11 +6,13 @@ Core Pipeline - Orchestrates multi-agent analysis and decision-making
|
||||
# flake8: noqa: E501
|
||||
# pylint: disable=W0613,C0301
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from contextlib import nullcontext
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, Awaitable, Callable, Dict, List, Optional
|
||||
|
||||
@@ -32,7 +34,9 @@ from backend.agents.toolkit_factory import create_agent_toolkit
|
||||
from backend.agents.workspace_manager import WorkspaceManager
|
||||
from backend.agents.prompt_loader import get_prompt_loader
|
||||
from backend.llm.models import get_agent_formatter, get_agent_model
|
||||
from backend.config.constants import ANALYST_TYPES
|
||||
from backend.config.constants import ANALYST_TYPES, AGENT_CONFIG
|
||||
from backend.agents.dynamic_team_types import AnalystConfig
|
||||
from backend.tools.dynamic_team_tools import DynamicTeamController, set_controller
|
||||
|
||||
|
||||
def _resolve_evo_agent_ids() -> set[str]:
|
||||
@@ -68,12 +72,10 @@ def _resolve_evo_agent_ids() -> set[str]:
|
||||
# Team infrastructure imports (graceful import - may not exist yet)
|
||||
try:
|
||||
from backend.agents.team.team_coordinator import TeamCoordinator
|
||||
from backend.agents.team.msg_hub import MsgHub as TeamMsgHub
|
||||
TEAM_COORD_AVAILABLE = True
|
||||
except ImportError:
|
||||
TEAM_COORD_AVAILABLE = False
|
||||
TeamCoordinator = None
|
||||
TeamMsgHub = None
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -84,6 +86,9 @@ def _log(msg: str) -> None:
|
||||
logger.info(msg)
|
||||
|
||||
|
||||
from backend.core.apo import PolicyOptimizer
|
||||
|
||||
|
||||
class TradingPipeline:
|
||||
"""
|
||||
Trading Pipeline - Orchestrates the complete trading cycle
|
||||
@@ -98,7 +103,7 @@ class TradingPipeline:
|
||||
|
||||
Real-time updates via StateSync after each agent completes.
|
||||
|
||||
Supports both legacy agent lists and run-scoped agent loading.
|
||||
Supports run-scoped EvoAgent loading with workspace-driven configuration.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
@@ -127,7 +132,21 @@ class TradingPipeline:
|
||||
self.runtime_manager = runtime_manager
|
||||
self._session_key: Optional[str] = None
|
||||
self._dynamic_analysts: Dict[str, Any] = {}
|
||||
self._dynamic_analyst_configs: Dict[str, AnalystConfig] = {}
|
||||
|
||||
# Initialize APO (Autonomous Policy Optimizer)
|
||||
config_name = workspace_id or (runtime_manager.config_name if runtime_manager else "default")
|
||||
self.apo = PolicyOptimizer(config_name=config_name)
|
||||
|
||||
# Initialize dynamic team controller and inject into PM
|
||||
self._team_controller = DynamicTeamController(
|
||||
create_callback=self._create_runtime_analyst,
|
||||
remove_callback=self._remove_runtime_analyst,
|
||||
get_analysts_callback=lambda: self._all_analysts() + [self.risk_manager, self.pm],
|
||||
)
|
||||
set_controller(self._team_controller)
|
||||
|
||||
# Backward compatibility: also set individual callbacks if PM expects them
|
||||
if hasattr(self.pm, "set_team_controller"):
|
||||
self.pm.set_team_controller(
|
||||
create_agent_callback=self._create_runtime_analyst,
|
||||
@@ -150,23 +169,7 @@ class TradingPipeline:
|
||||
execute_decisions: bool = True,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Run one complete trading cycle
|
||||
|
||||
Args:
|
||||
tickers: List of stock tickers
|
||||
date: Trading date (YYYY-MM-DD)
|
||||
prices: Open prices {ticker: price} (for backtest)
|
||||
close_prices: Close prices for settlement (for backtest)
|
||||
market_caps: Optional market caps for baseline calculation
|
||||
get_open_prices_fn: Async callback to wait for open prices (live mode)
|
||||
get_close_prices_fn: Async callback to wait for close prices (live mode)
|
||||
|
||||
For live mode:
|
||||
- Analysis runs immediately
|
||||
- Execution waits for market open via get_open_prices_fn
|
||||
- Settlement waits for market close via get_close_prices_fn
|
||||
|
||||
Each agent's result is broadcast immediately via StateSync.
|
||||
Run one complete trading cycle with checkpointing support.
|
||||
"""
|
||||
_log(f"Starting cycle {date} - {len(tickers)} tickers")
|
||||
session_key = TradingSessionKey(date=date).key()
|
||||
@@ -176,12 +179,43 @@ class TradingPipeline:
|
||||
agents=active_analysts + [self.risk_manager, self.pm],
|
||||
session_key=session_key,
|
||||
)
|
||||
|
||||
# Load checkpoint if exists
|
||||
checkpoint = self._load_checkpoint(session_key)
|
||||
checkpoint_data = checkpoint.get("data", {}) if checkpoint else {}
|
||||
last_phase = checkpoint.get("phase") if checkpoint else None
|
||||
|
||||
if checkpoint:
|
||||
_log(f"Resuming from checkpoint: {last_phase}")
|
||||
# Restore state from checkpoint
|
||||
analyst_results = checkpoint_data.get("analyst_results", [])
|
||||
risk_assessment = checkpoint_data.get("risk_assessment", {})
|
||||
self.conference_summary = checkpoint_data.get("conference_summary")
|
||||
final_predictions = checkpoint_data.get("final_predictions", [])
|
||||
pm_result = checkpoint_data.get("pm_result", {})
|
||||
execution_result = checkpoint_data.get("execution_result", {})
|
||||
settlement_result = checkpoint_data.get("settlement_result")
|
||||
# Prefer passed prices if not hold in checkpoint
|
||||
if not prices:
|
||||
prices = checkpoint_data.get("prices")
|
||||
if not close_prices:
|
||||
close_prices = checkpoint_data.get("close_prices")
|
||||
else:
|
||||
analyst_results = []
|
||||
risk_assessment = {}
|
||||
self.conference_summary = None
|
||||
final_predictions = []
|
||||
pm_result = {}
|
||||
execution_result = {}
|
||||
settlement_result = None
|
||||
|
||||
if self.runtime_manager:
|
||||
self.runtime_manager.set_session_key(session_key)
|
||||
self._runtime_log_event("cycle:start", {"tickers": tickers, "date": date})
|
||||
self._runtime_log_event("cycle:start", {"tickers": tickers, "date": date, "resumed": checkpoint is not None})
|
||||
self._runtime_batch_status(active_analysts, "analysis_in_progress")
|
||||
|
||||
# Phase 0: Clear short-term memory to avoid cross-day context pollution
|
||||
# Phase 0: Clear memory (only if not resuming or if resuming from very start)
|
||||
if not last_phase:
|
||||
_log("Phase 0: Clearing memory")
|
||||
await self._clear_all_agent_memory()
|
||||
|
||||
@@ -196,15 +230,40 @@ class TradingPipeline:
|
||||
"system",
|
||||
),
|
||||
):
|
||||
# Phase 1.1: Analysts (parallel execution with TeamCoordinator)
|
||||
# Phase 1.0: PM assesses team coverage and expands if needed
|
||||
if not last_phase or last_phase == "cleared":
|
||||
_log("Phase 1.0: Team gap assessment")
|
||||
await self._run_team_gap_assessment(
|
||||
tickers=tickers,
|
||||
date=date,
|
||||
prices=prices,
|
||||
)
|
||||
active_analysts = self._get_active_analysts()
|
||||
if self.runtime_manager:
|
||||
self._runtime_batch_status(active_analysts, "analysis_in_progress")
|
||||
self._save_checkpoint(session_key, "team_assessment", {
|
||||
"prices": prices,
|
||||
"close_prices": close_prices,
|
||||
})
|
||||
last_phase = "team_assessment"
|
||||
|
||||
# Phase 1.1: Analysts
|
||||
if last_phase == "team_assessment":
|
||||
_log("Phase 1.1: Analyst analysis (parallel)")
|
||||
analyst_results = await self._run_analysts_parallel(
|
||||
tickers,
|
||||
date,
|
||||
active_analysts=active_analysts,
|
||||
)
|
||||
self._save_checkpoint(session_key, "analysis", {
|
||||
"analyst_results": analyst_results,
|
||||
"prices": prices,
|
||||
"close_prices": close_prices
|
||||
})
|
||||
last_phase = "analysis"
|
||||
|
||||
# Phase 1.2: Risk Manager
|
||||
if last_phase == "analysis":
|
||||
_log("Phase 1.2: Risk assessment")
|
||||
self._runtime_update_status(self.risk_manager, "risk_assessment")
|
||||
risk_assessment = await self._run_risk_manager_with_sync(
|
||||
@@ -212,8 +271,16 @@ class TradingPipeline:
|
||||
date,
|
||||
prices,
|
||||
)
|
||||
self._save_checkpoint(session_key, "risk_assessment", {
|
||||
"analyst_results": analyst_results,
|
||||
"risk_assessment": risk_assessment,
|
||||
"prices": prices,
|
||||
"close_prices": close_prices
|
||||
})
|
||||
last_phase = "risk_assessment"
|
||||
|
||||
# Phase 2.1: Conference discussion (within same MsgHub)
|
||||
# Phase 2.1: Conference discussion
|
||||
if last_phase == "risk_assessment":
|
||||
_log("Phase 2.1: Conference discussion")
|
||||
conference_summary = await self._run_conference_cycles(
|
||||
tickers=tickers,
|
||||
@@ -223,28 +290,56 @@ class TradingPipeline:
|
||||
risk_assessment=risk_assessment,
|
||||
)
|
||||
self.conference_summary = conference_summary
|
||||
self._save_checkpoint(session_key, "conference", {
|
||||
"analyst_results": analyst_results,
|
||||
"risk_assessment": risk_assessment,
|
||||
"conference_summary": conference_summary,
|
||||
"prices": prices,
|
||||
"close_prices": close_prices
|
||||
})
|
||||
last_phase = "conference"
|
||||
|
||||
# Phase 2.2: Analysts generate final structured predictions
|
||||
if last_phase == "conference":
|
||||
_log("Phase 2.2: Analysts generate final structured predictions")
|
||||
final_predictions = await self._collect_final_predictions(
|
||||
tickers,
|
||||
date,
|
||||
active_analysts=active_analysts,
|
||||
)
|
||||
self._save_checkpoint(session_key, "predictions", {
|
||||
"analyst_results": analyst_results,
|
||||
"risk_assessment": risk_assessment,
|
||||
"conference_summary": conference_summary,
|
||||
"final_predictions": final_predictions,
|
||||
"prices": prices,
|
||||
"close_prices": close_prices
|
||||
})
|
||||
last_phase = "predictions"
|
||||
|
||||
# Record final predictions for leaderboard ranking
|
||||
if self.settlement_coordinator:
|
||||
# Record final predictions
|
||||
if last_phase == "predictions" and self.settlement_coordinator:
|
||||
self.settlement_coordinator.record_analyst_predictions(
|
||||
final_predictions,
|
||||
)
|
||||
|
||||
# Live mode: wait for market open before execution
|
||||
if get_open_prices_fn:
|
||||
# Live mode: wait for market open
|
||||
if not prices and get_open_prices_fn:
|
||||
_log("Waiting for market open...")
|
||||
prices = await get_open_prices_fn()
|
||||
_log(f"Got open prices: {prices}")
|
||||
# Update prices in checkpoint if we just got them
|
||||
self._save_checkpoint(session_key, "predictions", {
|
||||
"analyst_results": analyst_results,
|
||||
"risk_assessment": risk_assessment,
|
||||
"conference_summary": conference_summary,
|
||||
"final_predictions": final_predictions,
|
||||
"prices": prices,
|
||||
"close_prices": close_prices
|
||||
})
|
||||
|
||||
# Phase 3: PM makes decisions
|
||||
if last_phase == "predictions":
|
||||
_log("Phase 3.1: PM makes decisions")
|
||||
self._runtime_update_status(self.pm, "decision_phase")
|
||||
pm_result = await self._run_pm_with_sync(
|
||||
@@ -254,12 +349,26 @@ class TradingPipeline:
|
||||
analyst_results,
|
||||
risk_assessment,
|
||||
)
|
||||
self._save_checkpoint(session_key, "decisions", {
|
||||
"analyst_results": analyst_results,
|
||||
"risk_assessment": risk_assessment,
|
||||
"conference_summary": conference_summary,
|
||||
"final_predictions": final_predictions,
|
||||
"pm_result": pm_result,
|
||||
"prices": prices,
|
||||
"close_prices": close_prices
|
||||
})
|
||||
last_phase = "decisions"
|
||||
|
||||
decisions = pm_result.get("decisions", {})
|
||||
# Outside MsgHub for execution and settlement
|
||||
decisions = pm_result.get("decisions", {}) if pm_result else {}
|
||||
if not execution_result:
|
||||
execution_result = {
|
||||
"executed_trades": [],
|
||||
"portfolio": self.pm.get_portfolio_state(),
|
||||
}
|
||||
|
||||
if last_phase == "decisions":
|
||||
if execute_decisions:
|
||||
_log("Phase 4: Executing trades")
|
||||
self._runtime_update_status(self.pm, "executing")
|
||||
@@ -267,14 +376,37 @@ class TradingPipeline:
|
||||
else:
|
||||
_log("Phase 4: Skipping trade execution")
|
||||
|
||||
# Live mode: wait for market close before settlement
|
||||
if get_close_prices_fn:
|
||||
self._save_checkpoint(session_key, "execution", {
|
||||
"analyst_results": analyst_results,
|
||||
"risk_assessment": risk_assessment,
|
||||
"conference_summary": conference_summary,
|
||||
"final_predictions": final_predictions,
|
||||
"pm_result": pm_result,
|
||||
"execution_result": execution_result,
|
||||
"prices": prices,
|
||||
"close_prices": close_prices
|
||||
})
|
||||
last_phase = "execution"
|
||||
|
||||
# Live mode: wait for market close
|
||||
if not close_prices and get_close_prices_fn:
|
||||
_log("Waiting for market close")
|
||||
close_prices = await get_close_prices_fn()
|
||||
_log(f"Got close prices: {close_prices}")
|
||||
# Update close_prices in checkpoint
|
||||
self._save_checkpoint(session_key, "execution", {
|
||||
"analyst_results": analyst_results,
|
||||
"risk_assessment": risk_assessment,
|
||||
"conference_summary": conference_summary,
|
||||
"final_predictions": final_predictions,
|
||||
"pm_result": pm_result,
|
||||
"execution_result": execution_result,
|
||||
"prices": prices,
|
||||
"close_prices": close_prices
|
||||
})
|
||||
|
||||
# Phase 5: Settlement - run after close prices available
|
||||
settlement_result = None
|
||||
# Phase 5: Settlement
|
||||
if last_phase == "execution":
|
||||
if close_prices and self.settlement_coordinator:
|
||||
_log("Phase 5: Daily review and generate memories")
|
||||
self._runtime_batch_status(
|
||||
@@ -316,6 +448,19 @@ class TradingPipeline:
|
||||
"reflection",
|
||||
)
|
||||
|
||||
self._save_checkpoint(session_key, "settlement", {
|
||||
"analyst_results": analyst_results,
|
||||
"risk_assessment": risk_assessment,
|
||||
"conference_summary": conference_summary,
|
||||
"final_predictions": final_predictions,
|
||||
"pm_result": pm_result,
|
||||
"execution_result": execution_result,
|
||||
"settlement_result": settlement_result,
|
||||
"prices": prices,
|
||||
"close_prices": close_prices
|
||||
})
|
||||
last_phase = "settlement"
|
||||
|
||||
_log(f"Cycle complete: {date}")
|
||||
self._runtime_batch_status(
|
||||
self._all_analysts() + [self.risk_manager, self.pm],
|
||||
@@ -323,6 +468,11 @@ class TradingPipeline:
|
||||
)
|
||||
self._runtime_log_event("cycle:end", {"tickers": tickers, "date": date})
|
||||
|
||||
# Optional: Clean up checkpoint after successful completion
|
||||
# path = self._get_checkpoint_path(session_key)
|
||||
# if path and path.exists():
|
||||
# path.unlink()
|
||||
|
||||
return {
|
||||
"analyst_results": analyst_results,
|
||||
"risk_assessment": risk_assessment,
|
||||
@@ -385,6 +535,44 @@ class TradingPipeline:
|
||||
await self.risk_manager.memory.clear()
|
||||
await self.pm.memory.clear()
|
||||
|
||||
def _get_checkpoint_path(self, session_key: str) -> Optional[Path]:
|
||||
"""Get the path to the pipeline checkpoint file."""
|
||||
if not self.runtime_manager or not self.runtime_manager.run_dir:
|
||||
return None
|
||||
checkpoint_dir = self.runtime_manager.run_dir / "state" / "checkpoints"
|
||||
checkpoint_dir.mkdir(parents=True, exist_ok=True)
|
||||
return checkpoint_dir / f"pipeline_{session_key}.json"
|
||||
|
||||
def _save_checkpoint(self, session_key: str, phase: str, data: Dict[str, Any]) -> None:
|
||||
"""Save the current pipeline state to a checkpoint file."""
|
||||
path = self._get_checkpoint_path(session_key)
|
||||
if not path:
|
||||
return
|
||||
|
||||
checkpoint = {
|
||||
"session_key": session_key,
|
||||
"phase": phase,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"data": data
|
||||
}
|
||||
try:
|
||||
path.write_text(json.dumps(checkpoint, ensure_ascii=False, indent=2, default=str), encoding="utf-8")
|
||||
_log(f"Checkpoint saved: {phase} for {session_key}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save checkpoint: {e}")
|
||||
|
||||
def _load_checkpoint(self, session_key: str) -> Optional[Dict[str, Any]]:
|
||||
"""Load the pipeline state from a checkpoint file."""
|
||||
path = self._get_checkpoint_path(session_key)
|
||||
if not path or not path.exists():
|
||||
return None
|
||||
|
||||
try:
|
||||
return json.loads(path.read_text(encoding="utf-8"))
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load checkpoint: {e}")
|
||||
return None
|
||||
|
||||
async def _sync_memory_if_retrieved(self, agent: Any) -> None:
|
||||
"""
|
||||
Check agent's short-term memory for retrieved long-term memory and sync to frontend.
|
||||
@@ -583,8 +771,28 @@ class TradingPipeline:
|
||||
await self.state_sync.on_agent_complete(
|
||||
agent_id="Daily Log",
|
||||
content=reflection_content,
|
||||
agent_name="每日记录",
|
||||
)
|
||||
|
||||
# Phase 6: APO (Autonomous Policy Optimization)
|
||||
# If the day was a loss, let APO suggest and apply policy updates.
|
||||
if hasattr(self, "apo") and self.apo:
|
||||
_log(f"Phase 6: APO - Running autonomous policy optimization for {date}")
|
||||
try:
|
||||
apo_result = await self.apo.run_optimization(
|
||||
date=date,
|
||||
reflection_content=reflection_content,
|
||||
settlement_result=settlement_result or {"portfolio_value": 100000.0 + total_pnl},
|
||||
analyst_results=analyst_results,
|
||||
decisions=decisions
|
||||
)
|
||||
if apo_result.get("status") == "completed":
|
||||
_log(f"APO: Successfully applied {len(apo_result.get('optimizations', []))} policy updates.")
|
||||
# Reload assets for next cycle to ensure they are picked up
|
||||
self.reload_runtime_assets()
|
||||
except Exception as e:
|
||||
logger.error(f"APO: Optimization failed: {e}")
|
||||
|
||||
def _build_reflection_content(
|
||||
self,
|
||||
date: str,
|
||||
@@ -830,12 +1038,13 @@ class TradingPipeline:
|
||||
|
||||
pm_msg = Msg(name="system", content=pm_prompt, role="user")
|
||||
pm_response = await self.pm.reply(pm_msg)
|
||||
pm_content = self._extract_text_content(pm_response.content)
|
||||
|
||||
if self.state_sync:
|
||||
pm_content = self._extract_text_content(pm_response.content)
|
||||
await self.state_sync.on_conference_message(
|
||||
agent_id="portfolio_manager",
|
||||
content=pm_content,
|
||||
agent_name=self._resolve_agent_display_name("portfolio_manager"),
|
||||
)
|
||||
|
||||
# Analysts share perspectives (supports per-round active team updates)
|
||||
@@ -860,6 +1069,7 @@ class TradingPipeline:
|
||||
await self.state_sync.on_conference_message(
|
||||
agent_id=analyst.name,
|
||||
content=analyst_content,
|
||||
agent_name=self._resolve_agent_display_name(analyst.name),
|
||||
)
|
||||
|
||||
if self.state_sync:
|
||||
@@ -892,6 +1102,7 @@ class TradingPipeline:
|
||||
await self.state_sync.on_conference_message(
|
||||
agent_id="conference summary",
|
||||
content=conference_summary,
|
||||
agent_name="会议总结",
|
||||
)
|
||||
await self.state_sync.on_conference_end()
|
||||
|
||||
@@ -949,6 +1160,116 @@ class TradingPipeline:
|
||||
f"and any remaining concerns about {', '.join(tickers)}."
|
||||
)
|
||||
|
||||
async def _run_team_gap_assessment(
|
||||
self,
|
||||
*,
|
||||
tickers: List[str],
|
||||
date: str,
|
||||
prices: Optional[Dict[str, float]],
|
||||
) -> str:
|
||||
active_analysts = self._get_active_analysts()
|
||||
team_summary = [
|
||||
{
|
||||
"agent_id": analyst.name,
|
||||
"display_name": self._resolve_agent_display_name(analyst.name),
|
||||
}
|
||||
for analyst in active_analysts
|
||||
]
|
||||
prompt = (
|
||||
f"As Portfolio Manager, perform a team coverage assessment before analysis for {date}.\n\n"
|
||||
f"Tickers: {', '.join(tickers)}\n"
|
||||
f"Current team: {json.dumps(team_summary, ensure_ascii=False, indent=2)}\n"
|
||||
f"Current prices snapshot: {json.dumps(prices, ensure_ascii=False, indent=2) if prices else 'N/A'}\n\n"
|
||||
"Your job in this phase is not to make investment decisions. "
|
||||
"First decide whether the current team has enough domain coverage. "
|
||||
"If the team is insufficient, immediately call dynamic team tools to create or clone the needed analysts now. "
|
||||
"Before creating any analyst, explicitly check whether an existing analyst already covers that role. "
|
||||
"Do not create duplicate roles with different IDs but the same responsibilities. "
|
||||
"If the current team is sufficient, explicitly say the current team is sufficient and explain why."
|
||||
)
|
||||
msg = Msg(name="system", content=prompt, role="user")
|
||||
response = await self.pm.reply(msg)
|
||||
pm_content = self._extract_text_content(response.content)
|
||||
enforced_pm_content = await self._enforce_pm_team_expansion_if_needed(
|
||||
tickers=tickers,
|
||||
date=date,
|
||||
pm_content=pm_content,
|
||||
)
|
||||
if enforced_pm_content:
|
||||
pm_content = enforced_pm_content
|
||||
|
||||
if self.state_sync:
|
||||
await self.state_sync.on_agent_complete(
|
||||
agent_id="portfolio_manager",
|
||||
agent_name=self._resolve_agent_display_name("portfolio_manager"),
|
||||
content=pm_content,
|
||||
)
|
||||
|
||||
return pm_content
|
||||
|
||||
def _pm_requests_team_expansion(self, text: str) -> bool:
|
||||
normalized = (text or "").strip().lower()
|
||||
if not normalized:
|
||||
return False
|
||||
|
||||
phrases = [
|
||||
"创建",
|
||||
"新增分析师",
|
||||
"补充分析师",
|
||||
"扩编团队",
|
||||
"需要行业分析师",
|
||||
"需要量化分析师",
|
||||
"需要宏观分析师",
|
||||
"需要补充",
|
||||
"先扩编",
|
||||
"create analyst",
|
||||
"create a new analyst",
|
||||
"add analyst",
|
||||
"expand the team",
|
||||
"need a specialist",
|
||||
"need another analyst",
|
||||
]
|
||||
return any(phrase in normalized for phrase in phrases)
|
||||
|
||||
async def _enforce_pm_team_expansion_if_needed(
|
||||
self,
|
||||
*,
|
||||
tickers: List[str],
|
||||
date: str,
|
||||
pm_content: str,
|
||||
) -> Optional[str]:
|
||||
if not self._pm_requests_team_expansion(pm_content):
|
||||
return None
|
||||
|
||||
before_ids = {agent.name for agent in self._get_active_analysts()}
|
||||
|
||||
followup_prompt = (
|
||||
f"You identified a team coverage gap for {date} across {', '.join(tickers)}. "
|
||||
"This is still the pre-analysis team assessment phase. "
|
||||
"Do not merely recommend adding analysts. If additional analysts are needed, "
|
||||
"you must now call the dynamic team tools (`create_analyst` or `clone_analyst`) "
|
||||
"to add the required specialists before analyst analysis begins. "
|
||||
"Only after the tool call succeeds may you explain why the new analysts were added. "
|
||||
"If you truly believe the current team is sufficient, explicitly say the current team is sufficient."
|
||||
)
|
||||
followup_msg = Msg(name="system", content=followup_prompt, role="user")
|
||||
followup_response = await self.pm.reply(followup_msg)
|
||||
followup_content = self._extract_text_content(followup_response.content)
|
||||
after_ids = {agent.name for agent in self._get_active_analysts()}
|
||||
|
||||
if after_ids != before_ids:
|
||||
created = sorted(after_ids - before_ids)
|
||||
logger.info(
|
||||
"PM team expansion enforced successfully before analysis; added analysts=%s",
|
||||
created,
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
"PM mentioned expansion in team assessment but did not add analysts after enforcement prompt",
|
||||
)
|
||||
|
||||
return followup_content
|
||||
|
||||
def _build_analyst_discussion_prompt(
|
||||
self,
|
||||
cycle: int,
|
||||
@@ -962,6 +1283,88 @@ class TradingPipeline:
|
||||
f"Do not use tools - focus on sharing your professional opinion."
|
||||
)
|
||||
|
||||
def _resolve_agent_display_name(self, agent_id: str) -> str:
|
||||
runtime_name = None
|
||||
if self.runtime_manager:
|
||||
state = self.runtime_manager.get_agent_state(agent_id)
|
||||
runtime_name = getattr(state, "display_name", None) if state else None
|
||||
if isinstance(runtime_name, str) and runtime_name.strip():
|
||||
return runtime_name.strip()
|
||||
|
||||
static_name = AGENT_CONFIG.get(agent_id, {}).get("name")
|
||||
if isinstance(static_name, str) and static_name.strip():
|
||||
return static_name.strip()
|
||||
|
||||
profile_path = Path(__file__).resolve().parents[2] / "runs" / self.runtime_manager.config_name / "agents" / agent_id / "PROFILE.md" if self.runtime_manager else None
|
||||
if profile_path and profile_path.exists():
|
||||
try:
|
||||
raw = profile_path.read_text(encoding="utf-8")
|
||||
for line in raw.splitlines():
|
||||
text = line.strip()
|
||||
if text.startswith("角色定位:"):
|
||||
value = text.split(":", 1)[1].strip()
|
||||
if value:
|
||||
return value
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return agent_id
|
||||
|
||||
@staticmethod
|
||||
def _normalize_role_key(value: str) -> str:
|
||||
normalized = (value or "").strip().lower()
|
||||
normalized = normalized.replace("_", "")
|
||||
normalized = normalized.replace(" ", "")
|
||||
replacements = {
|
||||
"analyst": "分析师",
|
||||
"macro": "宏观",
|
||||
"technical": "技术",
|
||||
"tech": "技术",
|
||||
"sentiment": "情绪",
|
||||
"fundamentals": "基本面",
|
||||
"fundamental": "基本面",
|
||||
"valuation": "估值",
|
||||
"crypto": "加密",
|
||||
"cryptocurrency": "加密",
|
||||
"semiconductor": "半导体",
|
||||
"industry": "行业",
|
||||
"sector": "行业",
|
||||
"risk": "风险",
|
||||
}
|
||||
for src, target in replacements.items():
|
||||
normalized = normalized.replace(src, target)
|
||||
return normalized
|
||||
|
||||
@staticmethod
|
||||
def _contains_cjk(value: str) -> bool:
|
||||
text = (value or "").strip()
|
||||
return any("\u4e00" <= ch <= "\u9fff" for ch in text)
|
||||
|
||||
def _find_similar_existing_analyst(
|
||||
self,
|
||||
*,
|
||||
agent_id: str,
|
||||
analyst_type: str,
|
||||
custom_config: Optional[AnalystConfig],
|
||||
) -> Optional[str]:
|
||||
requested_names = {self._normalize_role_key(agent_id)}
|
||||
if custom_config and custom_config.persona and custom_config.persona.name:
|
||||
requested_names.add(self._normalize_role_key(custom_config.persona.name))
|
||||
|
||||
for agent in self._all_analysts():
|
||||
existing_id = getattr(agent, "name", None) or getattr(agent, "agent_id", None)
|
||||
if not existing_id or existing_id == agent_id:
|
||||
continue
|
||||
|
||||
existing_names = {
|
||||
self._normalize_role_key(existing_id),
|
||||
self._normalize_role_key(self._resolve_agent_display_name(existing_id)),
|
||||
}
|
||||
if requested_names & existing_names:
|
||||
return existing_id
|
||||
|
||||
return None
|
||||
|
||||
async def _collect_final_predictions(
|
||||
self,
|
||||
tickers: List[str],
|
||||
@@ -1110,6 +1513,7 @@ class TradingPipeline:
|
||||
await self.state_sync.on_agent_complete(
|
||||
agent_id=analyst.name,
|
||||
content=text_content,
|
||||
agent_name=self._resolve_agent_display_name(analyst.name),
|
||||
)
|
||||
|
||||
return results
|
||||
@@ -1185,6 +1589,7 @@ class TradingPipeline:
|
||||
await self.state_sync.on_agent_complete(
|
||||
agent_id=analyst.name,
|
||||
content=text_content,
|
||||
agent_name=self._resolve_agent_display_name(analyst.name),
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
@@ -1266,6 +1671,7 @@ class TradingPipeline:
|
||||
await self.state_sync.on_agent_complete(
|
||||
agent_id="risk_manager",
|
||||
content=text_content,
|
||||
agent_name=self._resolve_agent_display_name("risk_manager"),
|
||||
)
|
||||
|
||||
return extracted
|
||||
@@ -1352,6 +1758,7 @@ class TradingPipeline:
|
||||
await self.state_sync.on_agent_complete(
|
||||
agent_id="portfolio_manager",
|
||||
content=text_content,
|
||||
agent_name=self._resolve_agent_display_name("portfolio_manager"),
|
||||
)
|
||||
|
||||
return extracted
|
||||
@@ -1562,28 +1969,95 @@ class TradingPipeline:
|
||||
"""Return static analysts plus runtime-created analysts."""
|
||||
return list(self.analysts) + list(self._dynamic_analysts.values())
|
||||
|
||||
def _create_runtime_analyst(self, agent_id: str, analyst_type: str) -> str:
|
||||
"""Create one runtime analyst instance."""
|
||||
if analyst_type not in ANALYST_TYPES:
|
||||
def _create_runtime_analyst(
|
||||
self,
|
||||
agent_id: str,
|
||||
analyst_type: str,
|
||||
custom_config: Optional[AnalystConfig] = None,
|
||||
) -> str:
|
||||
"""Create one runtime analyst instance.
|
||||
|
||||
Args:
|
||||
agent_id: Unique identifier for the new analyst
|
||||
analyst_type: Type of analyst (e.g., "technical_analyst")
|
||||
custom_config: Optional custom configuration for the analyst,
|
||||
including persona, soul_md, agents_md, etc.
|
||||
|
||||
Returns:
|
||||
Success or error message
|
||||
"""
|
||||
# Validate analyst_type or custom_config
|
||||
if analyst_type not in ANALYST_TYPES and not custom_config:
|
||||
return (
|
||||
f"Unknown analyst_type '{analyst_type}'. "
|
||||
f"Available: {', '.join(ANALYST_TYPES.keys())}"
|
||||
f"Available: {', '.join(ANALYST_TYPES.keys())}. "
|
||||
f"Or provide custom_config to create a custom analyst."
|
||||
)
|
||||
display_name = (
|
||||
custom_config.persona.name
|
||||
if custom_config and custom_config.persona and custom_config.persona.name
|
||||
else ""
|
||||
)
|
||||
if not self._contains_cjk(display_name):
|
||||
return (
|
||||
f"Analyst '{agent_id}' requires a Chinese display name. "
|
||||
"Please provide `name` in Chinese characters when creating dynamic analysts."
|
||||
)
|
||||
if agent_id in {agent.name for agent in self._all_analysts()}:
|
||||
return f"Analyst '{agent_id}' already exists."
|
||||
similar_existing = self._find_similar_existing_analyst(
|
||||
agent_id=agent_id,
|
||||
analyst_type=analyst_type,
|
||||
custom_config=custom_config,
|
||||
)
|
||||
if similar_existing:
|
||||
return (
|
||||
f"Analyst '{agent_id}' is too similar to existing analyst "
|
||||
f"'{similar_existing}'. Reuse or clone the existing analyst instead of "
|
||||
f"creating a duplicate role."
|
||||
)
|
||||
|
||||
config_name = getattr(self.pm, "config", {}).get("config_name", "default")
|
||||
project_root = Path(__file__).resolve().parents[2]
|
||||
|
||||
# Get persona: use custom_config if provided, else load from personas.yaml
|
||||
if custom_config and custom_config.persona:
|
||||
persona = {
|
||||
"name": custom_config.persona.name,
|
||||
"focus": custom_config.persona.focus,
|
||||
"description": custom_config.persona.description,
|
||||
}
|
||||
else:
|
||||
personas = get_prompt_loader().load_yaml_config("analyst", "personas")
|
||||
persona = personas.get(analyst_type, {})
|
||||
workspace_manager = WorkspaceManager(project_root=project_root)
|
||||
|
||||
# Build file contents: use custom if provided, else generate from persona
|
||||
file_contents = {}
|
||||
if custom_config:
|
||||
if custom_config.soul_md:
|
||||
file_contents["SOUL.md"] = custom_config.soul_md
|
||||
if custom_config.agents_md:
|
||||
file_contents["AGENTS.md"] = custom_config.agents_md
|
||||
if custom_config.profile_md:
|
||||
file_contents["PROFILE.md"] = custom_config.profile_md
|
||||
if custom_config.bootstrap_md:
|
||||
file_contents["BOOTSTRAP.md"] = custom_config.bootstrap_md
|
||||
|
||||
# Fill in any missing files with defaults
|
||||
if not file_contents or len(file_contents) < 4:
|
||||
default_files = workspace_manager.build_default_agent_files(
|
||||
agent_id=agent_id,
|
||||
persona=persona,
|
||||
)
|
||||
for key, value in default_files.items():
|
||||
if key not in file_contents:
|
||||
file_contents[key] = value
|
||||
|
||||
workspace_manager.ensure_agent_assets(
|
||||
config_name=config_name,
|
||||
agent_id=agent_id,
|
||||
file_contents=workspace_manager.build_default_agent_files(
|
||||
agent_id=agent_id,
|
||||
persona=persona,
|
||||
),
|
||||
file_contents=file_contents,
|
||||
)
|
||||
|
||||
# Create EvoAgent with workspace-driven configuration
|
||||
@@ -1594,11 +2068,23 @@ class TradingPipeline:
|
||||
agent_id,
|
||||
)
|
||||
agent_config = load_agent_workspace_config(workspace_dir / "agent.yaml")
|
||||
# Support model override from custom_config
|
||||
if custom_config and custom_config.model_name:
|
||||
# Import create_model for custom model creation
|
||||
from backend.llm.models import create_model
|
||||
# Use specified model name, default to openai provider
|
||||
model = create_model(
|
||||
model_name=custom_config.model_name,
|
||||
model_provider=custom_config.memory_config.get("model_provider", "openai") if custom_config.memory_config else "openai"
|
||||
)
|
||||
else:
|
||||
model = get_agent_model(analyst_type)
|
||||
|
||||
agent = EvoAgent(
|
||||
agent_id=agent_id,
|
||||
config_name=config_name,
|
||||
workspace_dir=workspace_dir,
|
||||
model=get_agent_model(analyst_type),
|
||||
model=model,
|
||||
formatter=get_agent_formatter(analyst_type),
|
||||
prompt_files=agent_config.prompt_files,
|
||||
)
|
||||
@@ -1611,6 +2097,53 @@ class TradingPipeline:
|
||||
# Keep workspace_id for backward compatibility
|
||||
setattr(agent, "workspace_id", config_name)
|
||||
self._dynamic_analysts[agent_id] = agent
|
||||
|
||||
if self.runtime_manager:
|
||||
display_name = None
|
||||
if custom_config and custom_config.persona and custom_config.persona.name:
|
||||
display_name = custom_config.persona.name
|
||||
self.runtime_manager.register_agent(
|
||||
agent_id,
|
||||
display_name=display_name,
|
||||
)
|
||||
self.runtime_manager.log_event(
|
||||
"agent:created",
|
||||
{
|
||||
"agent_id": agent_id,
|
||||
"analyst_type": analyst_type,
|
||||
"display_name": display_name,
|
||||
},
|
||||
)
|
||||
logger.info(
|
||||
"Dynamic analyst created: agent_id=%s analyst_type=%s custom=%s",
|
||||
agent_id,
|
||||
analyst_type,
|
||||
bool(custom_config),
|
||||
)
|
||||
if self.state_sync:
|
||||
try:
|
||||
asyncio.create_task(
|
||||
self.state_sync.emit(
|
||||
{
|
||||
"type": "runtime_agents_updated",
|
||||
"action": "created",
|
||||
"agentId": agent_id,
|
||||
"agentName": display_name or self._resolve_agent_display_name(agent_id),
|
||||
},
|
||||
persist=False,
|
||||
)
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Failed to broadcast runtime_agents_updated(create) for %s: %s",
|
||||
agent_id,
|
||||
exc,
|
||||
)
|
||||
|
||||
# Store custom config for future reference (e.g., cloning)
|
||||
if custom_config:
|
||||
self._dynamic_analyst_configs[agent_id] = custom_config
|
||||
|
||||
update_active_analysts(
|
||||
project_root=project_root,
|
||||
config_name=config_name,
|
||||
@@ -1624,6 +2157,33 @@ class TradingPipeline:
|
||||
if agent_id not in self._dynamic_analysts:
|
||||
return f"Runtime analyst '{agent_id}' not found."
|
||||
self._dynamic_analysts.pop(agent_id, None)
|
||||
# Also remove stored config if exists
|
||||
self._dynamic_analyst_configs.pop(agent_id, None)
|
||||
if self.runtime_manager:
|
||||
self.runtime_manager.unregister_agent(agent_id)
|
||||
self.runtime_manager.log_event(
|
||||
"agent:removed",
|
||||
{"agent_id": agent_id},
|
||||
)
|
||||
logger.info("Dynamic analyst removed: agent_id=%s", agent_id)
|
||||
if self.state_sync:
|
||||
try:
|
||||
asyncio.create_task(
|
||||
self.state_sync.emit(
|
||||
{
|
||||
"type": "runtime_agents_updated",
|
||||
"action": "removed",
|
||||
"agentId": agent_id,
|
||||
},
|
||||
persist=False,
|
||||
)
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Failed to broadcast runtime_agents_updated(remove) for %s: %s",
|
||||
agent_id,
|
||||
exc,
|
||||
)
|
||||
config_name = getattr(self.pm, "config", {}).get("config_name", "default")
|
||||
project_root = Path(__file__).resolve().parents[2]
|
||||
update_active_analysts(
|
||||
|
||||
@@ -4,6 +4,12 @@ Pipeline Runner - Independent trading pipeline execution
|
||||
|
||||
This module provides functions to start/stop trading pipelines
|
||||
that can be called from the REST API.
|
||||
|
||||
COMPATIBILITY_NOTE:
|
||||
This module still carries selected fallback creation paths used by managed
|
||||
runtime startup and compatibility flows. New runtime behavior should be judged
|
||||
against the run-scoped helpers and current pipeline selection rules rather than
|
||||
assuming every constructor here is the long-term default.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -11,6 +17,7 @@ from __future__ import annotations
|
||||
import asyncio
|
||||
import os
|
||||
from contextlib import AsyncExitStack
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Callable
|
||||
|
||||
@@ -22,7 +29,7 @@ from backend.agents.prompt_loader import get_prompt_loader
|
||||
from backend.agents.workspace_manager import WorkspaceManager
|
||||
from backend.config.constants import ANALYST_TYPES
|
||||
from backend.core.pipeline import TradingPipeline
|
||||
from backend.core.scheduler import BacktestScheduler, Scheduler
|
||||
from backend.core.scheduler import BacktestScheduler, Scheduler, normalize_schedule_mode
|
||||
from backend.llm.models import get_agent_formatter, get_agent_model
|
||||
from backend.runtime.manager import (
|
||||
TradingRuntimeManager,
|
||||
@@ -46,6 +53,21 @@ _gateway_instance: Optional[Gateway] = None
|
||||
_long_term_memories: List[Any] = []
|
||||
|
||||
|
||||
@dataclass
|
||||
class GatewayRuntimeBundle:
|
||||
"""Assembled runtime components for a Gateway-backed execution path."""
|
||||
|
||||
runtime_manager: TradingRuntimeManager
|
||||
market_service: MarketService
|
||||
storage_service: StorageService
|
||||
pipeline: TradingPipeline
|
||||
gateway: Gateway
|
||||
scheduler: Optional[Scheduler]
|
||||
scheduler_callback: Optional[Callable]
|
||||
long_term_memories: List[Any]
|
||||
trading_dates: List[str]
|
||||
|
||||
|
||||
def _set_gateway(gateway: Optional[Gateway]) -> None:
|
||||
"""Set global gateway reference."""
|
||||
global _gateway_instance
|
||||
@@ -443,6 +465,151 @@ def create_agents(
|
||||
return analysts, risk_manager, portfolio_manager, long_term_memories
|
||||
|
||||
|
||||
def build_gateway_runtime_bundle(
|
||||
*,
|
||||
run_id: str,
|
||||
run_dir: Path,
|
||||
bootstrap: Dict[str, Any],
|
||||
poll_interval: int = 10,
|
||||
) -> GatewayRuntimeBundle:
|
||||
"""Build the full Gateway runtime component graph for one run."""
|
||||
tickers = bootstrap.get("tickers", ["AAPL", "MSFT", "GOOGL", "AMZN", "NVDA", "META", "TSLA", "AMD", "NFLX", "AVGO", "PLTR", "COIN"])
|
||||
initial_cash = float(bootstrap.get("initial_cash", 100000.0))
|
||||
margin_requirement = float(bootstrap.get("margin_requirement", 0.0))
|
||||
max_comm_cycles = int(bootstrap.get("max_comm_cycles", 2))
|
||||
schedule_mode = normalize_schedule_mode(bootstrap.get("schedule_mode", "daily"))
|
||||
trigger_time = bootstrap.get("trigger_time", "09:30")
|
||||
interval_minutes = int(bootstrap.get("interval_minutes", 60))
|
||||
heartbeat_interval = int(bootstrap.get("heartbeat_interval", 0))
|
||||
mode = bootstrap.get("mode", "live")
|
||||
start_date = bootstrap.get("start_date")
|
||||
end_date = bootstrap.get("end_date")
|
||||
enable_memory = bootstrap.get("enable_memory", False)
|
||||
|
||||
is_backtest = mode == "backtest"
|
||||
|
||||
runtime_manager = TradingRuntimeManager(
|
||||
config_name=run_id,
|
||||
run_dir=run_dir,
|
||||
bootstrap=bootstrap,
|
||||
)
|
||||
runtime_manager.prepare_run()
|
||||
|
||||
market_service = MarketService(
|
||||
tickers=tickers,
|
||||
poll_interval=poll_interval,
|
||||
backtest_mode=is_backtest,
|
||||
api_key=os.getenv("FINNHUB_API_KEY") if not is_backtest else None,
|
||||
backtest_start_date=start_date if is_backtest else None,
|
||||
backtest_end_date=end_date if is_backtest else None,
|
||||
)
|
||||
|
||||
storage_service = StorageService(
|
||||
dashboard_dir=run_dir / "team_dashboard",
|
||||
initial_cash=initial_cash,
|
||||
config_name=run_id,
|
||||
)
|
||||
if not storage_service.files["summary"].exists():
|
||||
storage_service.initialize_empty_dashboard()
|
||||
else:
|
||||
storage_service.update_leaderboard_model_info()
|
||||
|
||||
analysts, risk_manager, pm, long_term_memories = create_agents(
|
||||
run_id=run_id,
|
||||
run_dir=run_dir,
|
||||
initial_cash=initial_cash,
|
||||
margin_requirement=margin_requirement,
|
||||
enable_long_term_memory=enable_memory,
|
||||
)
|
||||
for agent in analysts + [risk_manager, pm]:
|
||||
agent_id = getattr(agent, "agent_id", None) or getattr(agent, "name", None)
|
||||
if agent_id:
|
||||
runtime_manager.register_agent(agent_id)
|
||||
|
||||
portfolio_state = storage_service.load_portfolio_state()
|
||||
pm.load_portfolio_state(portfolio_state)
|
||||
|
||||
settlement_coordinator = SettlementCoordinator(
|
||||
storage=storage_service,
|
||||
initial_capital=initial_cash,
|
||||
)
|
||||
pipeline = TradingPipeline(
|
||||
analysts=analysts,
|
||||
risk_manager=risk_manager,
|
||||
portfolio_manager=pm,
|
||||
settlement_coordinator=settlement_coordinator,
|
||||
max_comm_cycles=max_comm_cycles,
|
||||
runtime_manager=runtime_manager,
|
||||
)
|
||||
|
||||
scheduler_callback = None
|
||||
live_scheduler = None
|
||||
trading_dates: List[str] = []
|
||||
|
||||
if is_backtest:
|
||||
backtest_scheduler = BacktestScheduler(
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
trading_calendar="NYSE",
|
||||
delay_between_days=0.5,
|
||||
)
|
||||
trading_dates = backtest_scheduler.get_trading_dates()
|
||||
|
||||
async def scheduler_callback_fn(callback):
|
||||
await backtest_scheduler.start(callback)
|
||||
|
||||
scheduler_callback = scheduler_callback_fn
|
||||
else:
|
||||
live_scheduler = Scheduler(
|
||||
mode=schedule_mode,
|
||||
trigger_time=trigger_time,
|
||||
interval_minutes=interval_minutes,
|
||||
heartbeat_interval=heartbeat_interval if heartbeat_interval > 0 else None,
|
||||
config={"config_name": run_id},
|
||||
)
|
||||
|
||||
async def scheduler_callback_fn(callback):
|
||||
await live_scheduler.start(callback)
|
||||
|
||||
scheduler_callback = scheduler_callback_fn
|
||||
|
||||
gateway = Gateway(
|
||||
market_service=market_service,
|
||||
storage_service=storage_service,
|
||||
pipeline=pipeline,
|
||||
scheduler_callback=scheduler_callback,
|
||||
config={
|
||||
"mode": mode,
|
||||
"backtest_mode": is_backtest,
|
||||
"tickers": tickers,
|
||||
"config_name": run_id,
|
||||
"schedule_mode": schedule_mode,
|
||||
"interval_minutes": interval_minutes,
|
||||
"trigger_time": trigger_time,
|
||||
"heartbeat_interval": heartbeat_interval,
|
||||
"initial_cash": initial_cash,
|
||||
"margin_requirement": margin_requirement,
|
||||
"max_comm_cycles": max_comm_cycles,
|
||||
"enable_memory": enable_memory,
|
||||
},
|
||||
scheduler=live_scheduler,
|
||||
)
|
||||
if is_backtest:
|
||||
gateway.set_backtest_dates(trading_dates)
|
||||
|
||||
return GatewayRuntimeBundle(
|
||||
runtime_manager=runtime_manager,
|
||||
market_service=market_service,
|
||||
storage_service=storage_service,
|
||||
pipeline=pipeline,
|
||||
gateway=gateway,
|
||||
scheduler=live_scheduler,
|
||||
scheduler_callback=scheduler_callback,
|
||||
long_term_memories=long_term_memories,
|
||||
trading_dates=trading_dates,
|
||||
)
|
||||
|
||||
|
||||
async def run_pipeline(
|
||||
run_id: str,
|
||||
run_dir: Path,
|
||||
@@ -483,7 +650,7 @@ async def run_pipeline(
|
||||
initial_cash = float(bootstrap.get("initial_cash", 100000.0))
|
||||
margin_requirement = float(bootstrap.get("margin_requirement", 0.0))
|
||||
max_comm_cycles = int(bootstrap.get("max_comm_cycles", 2))
|
||||
schedule_mode = bootstrap.get("schedule_mode", "daily")
|
||||
schedule_mode = normalize_schedule_mode(bootstrap.get("schedule_mode", "daily"))
|
||||
trigger_time = bootstrap.get("trigger_time", "09:30")
|
||||
interval_minutes = int(bootstrap.get("interval_minutes", 60))
|
||||
heartbeat_interval = int(bootstrap.get("heartbeat_interval", 0))
|
||||
|
||||
@@ -17,6 +17,14 @@ NYSE_TZ = ZoneInfo("America/New_York")
|
||||
NYSE_CALENDAR = mcal.get_calendar("NYSE")
|
||||
|
||||
|
||||
def normalize_schedule_mode(mode: str | None) -> str:
|
||||
"""Normalize schedule mode to the current public vocabulary."""
|
||||
value = str(mode or "daily").strip().lower()
|
||||
if value == "intraday":
|
||||
return "interval"
|
||||
return value or "daily"
|
||||
|
||||
|
||||
class Scheduler:
|
||||
"""
|
||||
Market-aware scheduler for live trading.
|
||||
@@ -31,7 +39,7 @@ class Scheduler:
|
||||
heartbeat_interval: Optional[int] = None,
|
||||
config: Optional[dict] = None,
|
||||
):
|
||||
self.mode = mode
|
||||
self.mode = normalize_schedule_mode(mode)
|
||||
self.trigger_time = trigger_time or "09:30" # NYSE timezone
|
||||
self.trigger_now = self.trigger_time == "now"
|
||||
self.interval_minutes = interval_minutes or 60
|
||||
@@ -107,7 +115,7 @@ class Scheduler:
|
||||
|
||||
if self.mode == "daily":
|
||||
self._task = asyncio.create_task(self._run_daily(self._callback))
|
||||
elif self.mode == "intraday":
|
||||
elif self.mode == "interval":
|
||||
self._task = asyncio.create_task(
|
||||
self._run_intraday(self._callback),
|
||||
)
|
||||
@@ -124,8 +132,13 @@ class Scheduler:
|
||||
"""Update scheduler parameters in-place and restart its timing loop."""
|
||||
changed = False
|
||||
|
||||
if mode and mode != self.mode:
|
||||
self.mode = mode
|
||||
if mode:
|
||||
normalized_mode = normalize_schedule_mode(mode)
|
||||
else:
|
||||
normalized_mode = None
|
||||
|
||||
if normalized_mode and normalized_mode != self.mode:
|
||||
self.mode = normalized_mode
|
||||
changed = True
|
||||
|
||||
if trigger_time and trigger_time != self.trigger_time:
|
||||
@@ -233,13 +246,13 @@ class Scheduler:
|
||||
await callback(date=current_date)
|
||||
|
||||
async def _run_intraday(self, callback: Callable):
|
||||
"""Run every N minutes (for future use)"""
|
||||
"""Run every N minutes in interval mode."""
|
||||
while self.running:
|
||||
now = self._now_nyse()
|
||||
current_date = now.strftime("%Y-%m-%d")
|
||||
|
||||
if self._is_trading_day(now):
|
||||
logger.info(f"Triggering intraday cycle for {current_date}")
|
||||
logger.info(f"Triggering interval cycle for {current_date}")
|
||||
await callback(date=current_date)
|
||||
|
||||
await asyncio.sleep(self.interval_minutes * 60)
|
||||
|
||||
@@ -123,6 +123,10 @@ class StateSync:
|
||||
# Persist to feed_history
|
||||
if persist:
|
||||
self.storage.add_feed_message(self._state, event)
|
||||
# Make persistence non-blocking to keep event loop snappy
|
||||
if asyncio.get_event_loop().is_running():
|
||||
asyncio.create_task(asyncio.to_thread(self.save_state))
|
||||
else:
|
||||
self.save_state()
|
||||
|
||||
# Broadcast to frontend
|
||||
@@ -135,6 +139,7 @@ class StateSync:
|
||||
self,
|
||||
agent_id: str,
|
||||
content: str,
|
||||
agent_name: Optional[str] = None,
|
||||
**extra,
|
||||
):
|
||||
"""
|
||||
@@ -151,6 +156,7 @@ class StateSync:
|
||||
{
|
||||
"type": "agent_message",
|
||||
"agentId": agent_id,
|
||||
"agentName": agent_name,
|
||||
"content": content,
|
||||
"ts": ts_ms,
|
||||
**extra,
|
||||
@@ -212,7 +218,12 @@ class StateSync:
|
||||
persist=False,
|
||||
)
|
||||
|
||||
async def on_conference_message(self, agent_id: str, content: str):
|
||||
async def on_conference_message(
|
||||
self,
|
||||
agent_id: str,
|
||||
content: str,
|
||||
agent_name: Optional[str] = None,
|
||||
):
|
||||
"""Called when an agent speaks during conference"""
|
||||
ts_ms = self._get_timestamp_ms()
|
||||
|
||||
@@ -220,6 +231,7 @@ class StateSync:
|
||||
{
|
||||
"type": "conference_message",
|
||||
"agentId": agent_id,
|
||||
"agentName": agent_name,
|
||||
"content": content,
|
||||
"ts": ts_ms,
|
||||
},
|
||||
|
||||
@@ -190,8 +190,9 @@ class MarketStore:
|
||||
name: str | None = None,
|
||||
sector: str | None = None,
|
||||
is_active: bool = True,
|
||||
) -> None:
|
||||
) -> int:
|
||||
timestamp = _utc_timestamp()
|
||||
count = 0
|
||||
with self._connect() as conn:
|
||||
conn.execute(
|
||||
"""
|
||||
@@ -206,6 +207,8 @@ class MarketStore:
|
||||
""",
|
||||
(symbol, name, sector, 1 if is_active else 0, timestamp, timestamp),
|
||||
)
|
||||
count += 1
|
||||
return count
|
||||
|
||||
def update_fetch_watermark(
|
||||
self,
|
||||
@@ -213,8 +216,9 @@ class MarketStore:
|
||||
symbol: str,
|
||||
price_date: str | None = None,
|
||||
news_date: str | None = None,
|
||||
) -> None:
|
||||
) -> int:
|
||||
timestamp = _utc_timestamp()
|
||||
count = 0
|
||||
with self._connect() as conn:
|
||||
conn.execute(
|
||||
"""
|
||||
@@ -227,6 +231,8 @@ class MarketStore:
|
||||
""",
|
||||
(symbol, timestamp, timestamp, price_date, news_date),
|
||||
)
|
||||
count += 1
|
||||
return count
|
||||
|
||||
def get_ticker_watermarks(self, symbol: str) -> dict[str, Any]:
|
||||
with self._connect() as conn:
|
||||
@@ -263,6 +269,8 @@ class MarketStore:
|
||||
count = 0
|
||||
with self._connect() as conn:
|
||||
for row in rows:
|
||||
if not row.get("date"):
|
||||
continue
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO ohlc
|
||||
@@ -341,6 +349,7 @@ class MarketStore:
|
||||
timestamp,
|
||||
),
|
||||
)
|
||||
count += 1
|
||||
for ticker in tickers:
|
||||
conn.execute(
|
||||
"""
|
||||
@@ -349,7 +358,6 @@ class MarketStore:
|
||||
""",
|
||||
(news_id, str(ticker).strip().upper()),
|
||||
)
|
||||
count += 1
|
||||
return count
|
||||
|
||||
def get_news_without_trade_date(self, symbol: str | None = None, *, limit: int = 5000) -> list[dict[str, Any]]:
|
||||
@@ -928,8 +936,9 @@ class MarketStore:
|
||||
as_of_date: str,
|
||||
content: str,
|
||||
source: str = "local",
|
||||
) -> None:
|
||||
) -> int:
|
||||
timestamp = _utc_timestamp()
|
||||
count = 0
|
||||
with self._connect() as conn:
|
||||
conn.execute(
|
||||
"""
|
||||
@@ -943,6 +952,8 @@ class MarketStore:
|
||||
""",
|
||||
(symbol, as_of_date, content, source, timestamp, timestamp),
|
||||
)
|
||||
count += 1
|
||||
return count
|
||||
|
||||
def delete_story_cache(
|
||||
self,
|
||||
@@ -1002,8 +1013,9 @@ class MarketStore:
|
||||
target_date: str,
|
||||
payload: dict[str, Any],
|
||||
source: str = "local",
|
||||
) -> None:
|
||||
) -> int:
|
||||
timestamp = _utc_timestamp()
|
||||
count = 0
|
||||
with self._connect() as conn:
|
||||
conn.execute(
|
||||
"""
|
||||
@@ -1017,6 +1029,8 @@ class MarketStore:
|
||||
""",
|
||||
(symbol, target_date, _json_dumps(payload), source, timestamp, timestamp),
|
||||
)
|
||||
count += 1
|
||||
return count
|
||||
|
||||
def delete_similar_day_cache(
|
||||
self,
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Gateway Server - Entry point for Gateway subprocess.
|
||||
"""Gateway Server - Entry point for the managed Gateway subprocess.
|
||||
|
||||
This module is launched as a subprocess by the Control Plane (FastAPI)
|
||||
to run the Data Plane (Gateway + Pipeline).
|
||||
This module is launched by `runtime_service` when the runtime API is used to
|
||||
spawn a run-scoped Gateway process.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from contextlib import AsyncExitStack
|
||||
from pathlib import Path
|
||||
@@ -19,22 +18,13 @@ from dotenv import load_dotenv
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
from backend.agents.prompt_loader import get_prompt_loader
|
||||
from backend.core.pipeline import TradingPipeline
|
||||
from backend.core.pipeline_runner import create_agents
|
||||
from backend.core.scheduler import BacktestScheduler, Scheduler
|
||||
from backend.core.pipeline_runner import build_gateway_runtime_bundle
|
||||
from backend.runtime.manager import (
|
||||
TradingRuntimeManager,
|
||||
set_global_runtime_manager,
|
||||
clear_global_runtime_manager,
|
||||
)
|
||||
from backend.services.gateway import Gateway
|
||||
from backend.services.market import MarketService
|
||||
from backend.services.storage import StorageService
|
||||
from backend.utils.settlement import SettlementCoordinator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
_prompt_loader = get_prompt_loader()
|
||||
|
||||
|
||||
INFO_LOGGER_PREFIXES = (
|
||||
@@ -110,153 +100,24 @@ async def run_gateway(
|
||||
port: int
|
||||
):
|
||||
"""Run Gateway with Pipeline."""
|
||||
|
||||
# Extract config
|
||||
tickers = bootstrap.get("tickers", ["AAPL", "MSFT", "GOOGL", "AMZN", "NVDA", "META", "TSLA", "AMD", "NFLX", "AVGO", "PLTR", "COIN"])
|
||||
initial_cash = float(bootstrap.get("initial_cash", 100000.0))
|
||||
margin_requirement = float(bootstrap.get("margin_requirement", 0.0))
|
||||
max_comm_cycles = int(bootstrap.get("max_comm_cycles", 2))
|
||||
schedule_mode = bootstrap.get("schedule_mode", "daily")
|
||||
trigger_time = bootstrap.get("trigger_time", "09:30")
|
||||
interval_minutes = int(bootstrap.get("interval_minutes", 60))
|
||||
heartbeat_interval = int(bootstrap.get("heartbeat_interval", 0)) # 0 = disabled
|
||||
mode = bootstrap.get("mode", "live")
|
||||
start_date = bootstrap.get("start_date")
|
||||
end_date = bootstrap.get("end_date")
|
||||
enable_memory = bootstrap.get("enable_memory", False)
|
||||
poll_interval = int(bootstrap.get("poll_interval", 10))
|
||||
|
||||
is_backtest = mode == "backtest"
|
||||
|
||||
logger.info(f"[Gateway Server] Starting run {run_id} on port {port}")
|
||||
|
||||
# Create runtime manager
|
||||
runtime_manager = TradingRuntimeManager(
|
||||
config_name=run_id,
|
||||
run_dir=run_dir,
|
||||
bootstrap=bootstrap,
|
||||
)
|
||||
runtime_manager.prepare_run()
|
||||
set_global_runtime_manager(runtime_manager)
|
||||
|
||||
try:
|
||||
async with AsyncExitStack() as stack:
|
||||
# Create services
|
||||
market_service = MarketService(
|
||||
tickers=tickers,
|
||||
poll_interval=poll_interval,
|
||||
backtest_mode=is_backtest,
|
||||
api_key=os.getenv("FINNHUB_API_KEY") if not is_backtest else None,
|
||||
backtest_start_date=start_date if is_backtest else None,
|
||||
backtest_end_date=end_date if is_backtest else None,
|
||||
)
|
||||
|
||||
storage_service = StorageService(
|
||||
dashboard_dir=run_dir / "team_dashboard",
|
||||
initial_cash=initial_cash,
|
||||
config_name=run_id,
|
||||
)
|
||||
|
||||
if not storage_service.files["summary"].exists():
|
||||
storage_service.initialize_empty_dashboard()
|
||||
else:
|
||||
storage_service.update_leaderboard_model_info()
|
||||
|
||||
# Create agents
|
||||
analysts, risk_manager, pm, long_term_memories = create_agents(
|
||||
bundle = build_gateway_runtime_bundle(
|
||||
run_id=run_id,
|
||||
run_dir=run_dir,
|
||||
initial_cash=initial_cash,
|
||||
margin_requirement=margin_requirement,
|
||||
enable_long_term_memory=enable_memory,
|
||||
bootstrap=bootstrap,
|
||||
poll_interval=poll_interval,
|
||||
)
|
||||
set_global_runtime_manager(bundle.runtime_manager)
|
||||
|
||||
# Register agents
|
||||
for agent in analysts + [risk_manager, pm]:
|
||||
agent_id = getattr(agent, "agent_id", None) or getattr(agent, "name", None)
|
||||
if agent_id:
|
||||
runtime_manager.register_agent(agent_id)
|
||||
|
||||
# Load portfolio state
|
||||
portfolio_state = storage_service.load_portfolio_state()
|
||||
pm.load_portfolio_state(portfolio_state)
|
||||
|
||||
# Create settlement coordinator
|
||||
settlement_coordinator = SettlementCoordinator(
|
||||
storage=storage_service,
|
||||
initial_capital=initial_cash,
|
||||
)
|
||||
|
||||
# Create pipeline
|
||||
pipeline = TradingPipeline(
|
||||
analysts=analysts,
|
||||
risk_manager=risk_manager,
|
||||
portfolio_manager=pm,
|
||||
settlement_coordinator=settlement_coordinator,
|
||||
max_comm_cycles=max_comm_cycles,
|
||||
runtime_manager=runtime_manager,
|
||||
)
|
||||
|
||||
# Create scheduler
|
||||
scheduler_callback = None
|
||||
live_scheduler = None
|
||||
|
||||
if is_backtest:
|
||||
backtest_scheduler = BacktestScheduler(
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
trading_calendar="NYSE",
|
||||
delay_between_days=0.5,
|
||||
)
|
||||
|
||||
async def scheduler_callback_fn(callback):
|
||||
await backtest_scheduler.start(callback)
|
||||
|
||||
scheduler_callback = scheduler_callback_fn
|
||||
else:
|
||||
live_scheduler = Scheduler(
|
||||
mode=schedule_mode,
|
||||
trigger_time=trigger_time,
|
||||
interval_minutes=interval_minutes,
|
||||
heartbeat_interval=heartbeat_interval if heartbeat_interval > 0 else None,
|
||||
config={"config_name": run_id},
|
||||
)
|
||||
|
||||
async def scheduler_callback_fn(callback):
|
||||
await live_scheduler.start(callback)
|
||||
|
||||
scheduler_callback = scheduler_callback_fn
|
||||
|
||||
# Enter long-term memory contexts
|
||||
for memory in long_term_memories:
|
||||
async with AsyncExitStack() as stack:
|
||||
for memory in bundle.long_term_memories:
|
||||
await stack.enter_async_context(memory)
|
||||
|
||||
# Create Gateway
|
||||
gateway = Gateway(
|
||||
market_service=market_service,
|
||||
storage_service=storage_service,
|
||||
pipeline=pipeline,
|
||||
scheduler_callback=scheduler_callback,
|
||||
config={
|
||||
"mode": mode,
|
||||
"backtest_mode": is_backtest,
|
||||
"tickers": tickers,
|
||||
"config_name": run_id,
|
||||
"schedule_mode": schedule_mode,
|
||||
"interval_minutes": interval_minutes,
|
||||
"trigger_time": trigger_time,
|
||||
"heartbeat_interval": heartbeat_interval,
|
||||
"initial_cash": initial_cash,
|
||||
"margin_requirement": margin_requirement,
|
||||
"max_comm_cycles": max_comm_cycles,
|
||||
"enable_memory": enable_memory,
|
||||
},
|
||||
scheduler=live_scheduler,
|
||||
)
|
||||
|
||||
# Start Gateway (blocks until shutdown)
|
||||
logger.info(f"[Gateway Server] Gateway starting on port {port}")
|
||||
await gateway.start(host="0.0.0.0", port=port)
|
||||
await bundle.gateway.start(host="0.0.0.0", port=port)
|
||||
|
||||
except asyncio.CancelledError:
|
||||
logger.info("[Gateway Server] Cancelled")
|
||||
|
||||
@@ -444,6 +444,16 @@ def create_model(
|
||||
"""
|
||||
provider = canonicalize_model_provider(provider)
|
||||
|
||||
# If provider is default OPENAI but model name looks like deepseek,
|
||||
# check if we should switch to DASHSCOPE.
|
||||
if provider == "OPENAI" and "deepseek" in model_name.lower() and os.getenv("DASHSCOPE_API_KEY"):
|
||||
provider = "DASHSCOPE"
|
||||
|
||||
# Intelligent routing: if it's a DeepSeek model and we have DashScope credentials,
|
||||
# prefer using DashScopeChatModel over OpenAIChatModel.
|
||||
if provider == "DEEPSEEK" and os.getenv("DASHSCOPE_API_KEY"):
|
||||
provider = "DASHSCOPE"
|
||||
|
||||
model_class = PROVIDER_MODEL_MAP.get(provider)
|
||||
if model_class is None:
|
||||
raise ValueError(f"Unsupported provider: {provider}")
|
||||
|
||||
596
backend/main.py
@@ -1,596 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Main Entry Point
|
||||
Supports: backtest, live modes
|
||||
"""
|
||||
import argparse
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
from contextlib import AsyncExitStack
|
||||
from pathlib import Path
|
||||
import loguru
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from backend.agents import EvoAgent
|
||||
from backend.agents.agent_workspace import load_agent_workspace_config
|
||||
from backend.agents.skills_manager import SkillsManager
|
||||
from backend.agents.toolkit_factory import create_agent_toolkit, load_agent_profiles
|
||||
from backend.agents.prompt_loader import get_prompt_loader
|
||||
# WorkspaceManager is RunWorkspaceManager - provides run-scoped asset management
|
||||
# All runtime state lives under runs/<run_id>/
|
||||
from backend.agents.workspace_manager import WorkspaceManager
|
||||
from backend.config.bootstrap_config import resolve_runtime_config
|
||||
from backend.config.constants import ANALYST_TYPES
|
||||
from backend.core.pipeline import TradingPipeline
|
||||
from backend.core.scheduler import BacktestScheduler, Scheduler
|
||||
from backend.llm.models import get_agent_formatter, get_agent_model
|
||||
from backend.api.runtime import unregister_runtime_manager
|
||||
from backend.runtime.manager import (
|
||||
TradingRuntimeManager,
|
||||
set_global_runtime_manager,
|
||||
clear_global_runtime_manager,
|
||||
)
|
||||
from backend.gateway_server import configure_gateway_logging
|
||||
from backend.services.gateway import Gateway
|
||||
from backend.services.market import MarketService
|
||||
from backend.services.storage import StorageService
|
||||
from backend.utils.settlement import SettlementCoordinator
|
||||
|
||||
load_dotenv()
|
||||
logger = logging.getLogger(__name__)
|
||||
loguru.logger.disable("flowllm")
|
||||
loguru.logger.disable("reme_ai")
|
||||
configure_gateway_logging(verbose=os.getenv("LOG_LEVEL", "").upper() == "DEBUG")
|
||||
_prompt_loader = get_prompt_loader()
|
||||
|
||||
|
||||
def _get_run_dir(config_name: str) -> Path:
|
||||
"""Return the canonical run-scoped directory for a config.
|
||||
|
||||
This is the authoritative path for runtime state under runs/<run_id>/.
|
||||
All runtime assets, state, and exports are scoped to this directory.
|
||||
"""
|
||||
project_root = Path(__file__).resolve().parents[1]
|
||||
# Use RunWorkspaceManager for run-scoped path resolution
|
||||
return WorkspaceManager(project_root=project_root).get_run_dir(config_name)
|
||||
|
||||
|
||||
def _resolve_runtime_config(args) -> dict:
|
||||
"""Merge env defaults with run-scoped bootstrap config."""
|
||||
project_root = Path(__file__).resolve().parents[1]
|
||||
return resolve_runtime_config(
|
||||
project_root=project_root,
|
||||
config_name=args.config_name,
|
||||
enable_memory=args.enable_memory,
|
||||
schedule_mode=args.schedule_mode,
|
||||
interval_minutes=args.interval_minutes,
|
||||
trigger_time=args.trigger_time,
|
||||
)
|
||||
|
||||
|
||||
def create_long_term_memory(agent_name: str, config_name: str):
|
||||
"""
|
||||
Create ReMeTaskLongTermMemory for an agent
|
||||
|
||||
Requires DASHSCOPE_API_KEY env var
|
||||
"""
|
||||
from agentscope.memory import ReMeTaskLongTermMemory
|
||||
from agentscope.model import DashScopeChatModel
|
||||
from agentscope.embedding import DashScopeTextEmbedding
|
||||
|
||||
api_key = os.getenv("MEMORY_API_KEY")
|
||||
if not api_key:
|
||||
logger.warning("MEMORY_API_KEY not set, long-term memory disabled")
|
||||
return None
|
||||
|
||||
memory_dir = str(_get_run_dir(config_name) / "memory")
|
||||
|
||||
return ReMeTaskLongTermMemory(
|
||||
agent_name=agent_name,
|
||||
user_name=agent_name,
|
||||
model=DashScopeChatModel(
|
||||
model_name=os.getenv("MEMORY_MODEL_NAME", "qwen3-max"),
|
||||
api_key=api_key,
|
||||
stream=False,
|
||||
),
|
||||
embedding_model=DashScopeTextEmbedding(
|
||||
model_name=os.getenv(
|
||||
"MEMORY_EMBEDDING_MODEL",
|
||||
"text-embedding-v4",
|
||||
),
|
||||
api_key=api_key,
|
||||
dimensions=1024,
|
||||
),
|
||||
**{
|
||||
"vector_store.default.backend": "local",
|
||||
"vector_store.default.params.store_dir": memory_dir,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def _resolve_evo_agent_ids() -> set[str]:
|
||||
"""Return agent ids selected to use EvoAgent.
|
||||
|
||||
By default, all supported roles use EvoAgent.
|
||||
EVO_AGENT_IDS can be used to limit to specific roles (legacy behavior).
|
||||
Set EVO_AGENT_LEGACY=1 to disable EvoAgent entirely.
|
||||
|
||||
Supported roles:
|
||||
- analyst roles (fundamentals, technical, sentiment, valuation)
|
||||
- risk_manager
|
||||
- portfolio_manager
|
||||
|
||||
Example:
|
||||
EVO_AGENT_IDS=fundamentals_analyst,risk_manager,portfolio_manager
|
||||
"""
|
||||
from backend.config.constants import ANALYST_TYPES
|
||||
|
||||
all_supported = set(ANALYST_TYPES) | {"risk_manager", "portfolio_manager"}
|
||||
|
||||
raw = os.getenv("EVO_AGENT_IDS", "")
|
||||
if not raw.strip():
|
||||
# Default: all supported roles use EvoAgent
|
||||
return all_supported
|
||||
|
||||
if raw.strip().lower() in ("legacy", "old", "none"):
|
||||
return set()
|
||||
|
||||
requested = {
|
||||
item.strip()
|
||||
for item in raw.split(",")
|
||||
if item.strip()
|
||||
}
|
||||
return {
|
||||
agent_id
|
||||
for agent_id in requested
|
||||
if agent_id in ANALYST_TYPES or agent_id in {"risk_manager", "portfolio_manager"}
|
||||
}
|
||||
|
||||
|
||||
def _create_analyst_agent(
|
||||
*,
|
||||
analyst_type: str,
|
||||
config_name: str,
|
||||
model,
|
||||
formatter,
|
||||
skills_manager: SkillsManager,
|
||||
active_skill_map: dict[str, list[Path]],
|
||||
long_term_memory=None,
|
||||
):
|
||||
"""Create one analyst agent, optionally using EvoAgent."""
|
||||
active_skill_dirs = active_skill_map.get(analyst_type, [])
|
||||
toolkit = create_agent_toolkit(
|
||||
analyst_type,
|
||||
config_name,
|
||||
active_skill_dirs=active_skill_dirs,
|
||||
)
|
||||
|
||||
workspace_dir = skills_manager.get_agent_asset_dir(config_name, analyst_type)
|
||||
agent_config = load_agent_workspace_config(workspace_dir / "agent.yaml")
|
||||
agent = EvoAgent(
|
||||
agent_id=analyst_type,
|
||||
config_name=config_name,
|
||||
workspace_dir=workspace_dir,
|
||||
model=model,
|
||||
formatter=formatter,
|
||||
skills_manager=skills_manager,
|
||||
prompt_files=agent_config.prompt_files,
|
||||
long_term_memory=long_term_memory,
|
||||
)
|
||||
agent.toolkit = toolkit
|
||||
setattr(agent, "run_id", config_name)
|
||||
# Keep workspace_id for backward compatibility
|
||||
setattr(agent, "workspace_id", config_name)
|
||||
return agent
|
||||
|
||||
|
||||
def _create_risk_manager_agent(
|
||||
*,
|
||||
config_name: str,
|
||||
model,
|
||||
formatter,
|
||||
skills_manager: SkillsManager,
|
||||
active_skill_map: dict[str, list[Path]],
|
||||
long_term_memory=None,
|
||||
):
|
||||
"""Create the risk manager, optionally using EvoAgent."""
|
||||
active_skill_dirs = active_skill_map.get("risk_manager", [])
|
||||
toolkit = create_agent_toolkit(
|
||||
"risk_manager",
|
||||
config_name,
|
||||
active_skill_dirs=active_skill_dirs,
|
||||
)
|
||||
|
||||
use_evo_agent = "risk_manager" in _resolve_evo_agent_ids()
|
||||
|
||||
if use_evo_agent:
|
||||
workspace_dir = skills_manager.get_agent_asset_dir(config_name, "risk_manager")
|
||||
agent_config = load_agent_workspace_config(workspace_dir / "agent.yaml")
|
||||
agent = EvoAgent(
|
||||
agent_id="risk_manager",
|
||||
config_name=config_name,
|
||||
workspace_dir=workspace_dir,
|
||||
model=model,
|
||||
formatter=formatter,
|
||||
skills_manager=skills_manager,
|
||||
prompt_files=agent_config.prompt_files,
|
||||
long_term_memory=long_term_memory,
|
||||
)
|
||||
agent.toolkit = toolkit
|
||||
setattr(agent, "run_id", config_name)
|
||||
# Keep workspace_id for backward compatibility
|
||||
setattr(agent, "workspace_id", config_name)
|
||||
return agent
|
||||
|
||||
return RiskAgent(
|
||||
model=model,
|
||||
formatter=formatter,
|
||||
name="risk_manager",
|
||||
config={"config_name": config_name},
|
||||
long_term_memory=long_term_memory,
|
||||
toolkit=toolkit,
|
||||
)
|
||||
|
||||
|
||||
def _create_portfolio_manager_agent(
|
||||
*,
|
||||
config_name: str,
|
||||
model,
|
||||
formatter,
|
||||
initial_cash: float,
|
||||
margin_requirement: float,
|
||||
skills_manager: SkillsManager,
|
||||
active_skill_map: dict[str, list[Path]],
|
||||
long_term_memory=None,
|
||||
):
|
||||
"""Create the portfolio manager, optionally using EvoAgent."""
|
||||
active_skill_dirs = active_skill_map.get("portfolio_manager", [])
|
||||
use_evo_agent = "portfolio_manager" in _resolve_evo_agent_ids()
|
||||
|
||||
if use_evo_agent:
|
||||
workspace_dir = skills_manager.get_agent_asset_dir(
|
||||
config_name,
|
||||
"portfolio_manager",
|
||||
)
|
||||
agent_config = load_agent_workspace_config(workspace_dir / "agent.yaml")
|
||||
agent = EvoAgent(
|
||||
agent_id="portfolio_manager",
|
||||
config_name=config_name,
|
||||
workspace_dir=workspace_dir,
|
||||
model=model,
|
||||
formatter=formatter,
|
||||
skills_manager=skills_manager,
|
||||
prompt_files=agent_config.prompt_files,
|
||||
initial_cash=initial_cash,
|
||||
margin_requirement=margin_requirement,
|
||||
long_term_memory=long_term_memory,
|
||||
)
|
||||
agent.toolkit = create_agent_toolkit(
|
||||
"portfolio_manager",
|
||||
config_name,
|
||||
owner=agent,
|
||||
active_skill_dirs=active_skill_dirs,
|
||||
)
|
||||
setattr(agent, "run_id", config_name)
|
||||
# Keep workspace_id for backward compatibility
|
||||
setattr(agent, "workspace_id", config_name)
|
||||
return agent
|
||||
|
||||
return PMAgent(
|
||||
name="portfolio_manager",
|
||||
model=model,
|
||||
formatter=formatter,
|
||||
initial_cash=initial_cash,
|
||||
margin_requirement=margin_requirement,
|
||||
config={"config_name": config_name},
|
||||
long_term_memory=long_term_memory,
|
||||
toolkit_factory=create_agent_toolkit,
|
||||
toolkit_factory_kwargs={
|
||||
"active_skill_dirs": active_skill_dirs,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def create_agents(
|
||||
config_name: str,
|
||||
initial_cash: float,
|
||||
margin_requirement: float,
|
||||
enable_long_term_memory: bool = False,
|
||||
):
|
||||
"""Create all agents for the system
|
||||
|
||||
Returns:
|
||||
tuple: (analysts, risk_manager, portfolio_manager, long_term_memories)
|
||||
long_term_memories is a list of memory
|
||||
"""
|
||||
analysts = []
|
||||
long_term_memories = []
|
||||
workspace_manager = WorkspaceManager()
|
||||
workspace_manager.initialize_default_assets(
|
||||
config_name=config_name,
|
||||
agent_ids=list(ANALYST_TYPES.keys())
|
||||
+ ["risk_manager", "portfolio_manager"],
|
||||
analyst_personas=_prompt_loader.load_yaml_config("analyst", "personas"),
|
||||
)
|
||||
profiles = load_agent_profiles()
|
||||
skills_manager = SkillsManager()
|
||||
active_skill_map = skills_manager.prepare_active_skills(
|
||||
config_name=config_name,
|
||||
agent_defaults={
|
||||
agent_id: profile.get("skills", [])
|
||||
for agent_id, profile in profiles.items()
|
||||
},
|
||||
)
|
||||
|
||||
for analyst_type in ANALYST_TYPES:
|
||||
model = get_agent_model(analyst_type)
|
||||
formatter = get_agent_formatter(analyst_type)
|
||||
|
||||
long_term_memory = None
|
||||
if enable_long_term_memory:
|
||||
long_term_memory = create_long_term_memory(
|
||||
analyst_type,
|
||||
config_name,
|
||||
)
|
||||
if long_term_memory:
|
||||
long_term_memories.append(long_term_memory)
|
||||
|
||||
analyst = _create_analyst_agent(
|
||||
analyst_type=analyst_type,
|
||||
config_name=config_name,
|
||||
model=model,
|
||||
formatter=formatter,
|
||||
skills_manager=skills_manager,
|
||||
active_skill_map=active_skill_map,
|
||||
long_term_memory=long_term_memory,
|
||||
)
|
||||
analysts.append(analyst)
|
||||
|
||||
risk_long_term_memory = None
|
||||
if enable_long_term_memory:
|
||||
risk_long_term_memory = create_long_term_memory(
|
||||
"risk_manager",
|
||||
config_name,
|
||||
)
|
||||
if risk_long_term_memory:
|
||||
long_term_memories.append(risk_long_term_memory)
|
||||
|
||||
risk_manager = _create_risk_manager_agent(
|
||||
config_name=config_name,
|
||||
model=get_agent_model("risk_manager"),
|
||||
formatter=get_agent_formatter("risk_manager"),
|
||||
skills_manager=skills_manager,
|
||||
active_skill_map=active_skill_map,
|
||||
long_term_memory=risk_long_term_memory,
|
||||
)
|
||||
|
||||
pm_long_term_memory = None
|
||||
if enable_long_term_memory:
|
||||
pm_long_term_memory = create_long_term_memory(
|
||||
"portfolio_manager",
|
||||
config_name,
|
||||
)
|
||||
if pm_long_term_memory:
|
||||
long_term_memories.append(pm_long_term_memory)
|
||||
|
||||
portfolio_manager = _create_portfolio_manager_agent(
|
||||
config_name=config_name,
|
||||
model=get_agent_model("portfolio_manager"),
|
||||
formatter=get_agent_formatter("portfolio_manager"),
|
||||
initial_cash=initial_cash,
|
||||
margin_requirement=margin_requirement,
|
||||
skills_manager=skills_manager,
|
||||
active_skill_map=active_skill_map,
|
||||
long_term_memory=pm_long_term_memory,
|
||||
)
|
||||
|
||||
return analysts, risk_manager, portfolio_manager, long_term_memories
|
||||
async def run_with_gateway(args):
|
||||
"""Run with WebSocket gateway"""
|
||||
is_backtest = args.mode == "backtest"
|
||||
runtime_config = _resolve_runtime_config(args)
|
||||
|
||||
config_name = args.config_name
|
||||
tickers = runtime_config["tickers"]
|
||||
initial_cash = runtime_config["initial_cash"]
|
||||
margin_requirement = runtime_config["margin_requirement"]
|
||||
|
||||
runtime_manager = TradingRuntimeManager(
|
||||
config_name=config_name,
|
||||
run_dir=_get_run_dir(config_name),
|
||||
bootstrap=runtime_config,
|
||||
)
|
||||
runtime_manager.prepare_run()
|
||||
set_global_runtime_manager(runtime_manager)
|
||||
|
||||
# Create market service
|
||||
market_service = MarketService(
|
||||
tickers=tickers,
|
||||
poll_interval=args.poll_interval,
|
||||
backtest_mode=is_backtest,
|
||||
api_key=os.getenv("FINNHUB_API_KEY") if not is_backtest else None,
|
||||
backtest_start_date=args.start_date if is_backtest else None,
|
||||
backtest_end_date=args.end_date if is_backtest else None,
|
||||
)
|
||||
|
||||
# Create storage service
|
||||
storage_service = StorageService(
|
||||
dashboard_dir=_get_run_dir(config_name) / "team_dashboard",
|
||||
initial_cash=initial_cash,
|
||||
config_name=config_name,
|
||||
)
|
||||
|
||||
if not storage_service.files["summary"].exists():
|
||||
storage_service.initialize_empty_dashboard()
|
||||
else:
|
||||
storage_service.update_leaderboard_model_info()
|
||||
|
||||
# Create agents and pipeline
|
||||
analysts, risk_manager, pm, long_term_memories = create_agents(
|
||||
config_name=config_name,
|
||||
initial_cash=initial_cash,
|
||||
margin_requirement=margin_requirement,
|
||||
enable_long_term_memory=runtime_config["enable_memory"],
|
||||
)
|
||||
for agent in analysts + [risk_manager, pm]:
|
||||
agent_id = getattr(agent, "agent_id", None) or getattr(agent, "name", None)
|
||||
if agent_id:
|
||||
runtime_manager.register_agent(agent_id)
|
||||
portfolio_state = storage_service.load_portfolio_state()
|
||||
pm.load_portfolio_state(portfolio_state)
|
||||
|
||||
settlement_coordinator = SettlementCoordinator(
|
||||
storage=storage_service,
|
||||
initial_capital=initial_cash,
|
||||
)
|
||||
|
||||
pipeline = TradingPipeline(
|
||||
analysts=analysts,
|
||||
risk_manager=risk_manager,
|
||||
portfolio_manager=pm,
|
||||
settlement_coordinator=settlement_coordinator,
|
||||
max_comm_cycles=runtime_config["max_comm_cycles"],
|
||||
runtime_manager=runtime_manager,
|
||||
)
|
||||
|
||||
# Create scheduler callback
|
||||
scheduler_callback = None
|
||||
trading_dates = []
|
||||
live_scheduler = None
|
||||
|
||||
if is_backtest:
|
||||
backtest_scheduler = BacktestScheduler(
|
||||
start_date=args.start_date,
|
||||
end_date=args.end_date,
|
||||
trading_calendar="NYSE",
|
||||
delay_between_days=0.5,
|
||||
)
|
||||
trading_dates = backtest_scheduler.get_trading_dates()
|
||||
|
||||
async def scheduler_callback_fn(callback):
|
||||
await backtest_scheduler.start(callback)
|
||||
|
||||
scheduler_callback = scheduler_callback_fn
|
||||
else:
|
||||
# Live mode: use daily or intraday scheduler with NYSE timezone
|
||||
live_scheduler = Scheduler(
|
||||
mode=runtime_config["schedule_mode"],
|
||||
trigger_time=runtime_config["trigger_time"],
|
||||
interval_minutes=runtime_config["interval_minutes"],
|
||||
config={"config_name": config_name},
|
||||
)
|
||||
|
||||
async def scheduler_callback_fn(callback):
|
||||
await live_scheduler.start(callback)
|
||||
|
||||
scheduler_callback = scheduler_callback_fn
|
||||
|
||||
# Create gateway
|
||||
gateway = Gateway(
|
||||
market_service=market_service,
|
||||
storage_service=storage_service,
|
||||
pipeline=pipeline,
|
||||
scheduler_callback=scheduler_callback,
|
||||
config={
|
||||
"mode": args.mode,
|
||||
"backtest_mode": is_backtest,
|
||||
"tickers": tickers,
|
||||
"config_name": config_name,
|
||||
"schedule_mode": runtime_config["schedule_mode"],
|
||||
"interval_minutes": runtime_config["interval_minutes"],
|
||||
"trigger_time": runtime_config["trigger_time"],
|
||||
"initial_cash": initial_cash,
|
||||
"margin_requirement": margin_requirement,
|
||||
"max_comm_cycles": runtime_config["max_comm_cycles"],
|
||||
"enable_memory": runtime_config["enable_memory"],
|
||||
},
|
||||
scheduler=live_scheduler if not is_backtest else None,
|
||||
)
|
||||
|
||||
if is_backtest:
|
||||
gateway.set_backtest_dates(trading_dates)
|
||||
|
||||
# Start long-term memory contexts and run gateway
|
||||
async with AsyncExitStack() as stack:
|
||||
try:
|
||||
for memory in long_term_memories:
|
||||
await stack.enter_async_context(memory)
|
||||
await gateway.start(host=args.host, port=args.port)
|
||||
finally:
|
||||
# Persist long-term memories before cleanup
|
||||
for memory in long_term_memories:
|
||||
try:
|
||||
if hasattr(memory, 'save') and callable(getattr(memory, 'save')):
|
||||
await memory.save()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to persist memory: {e}")
|
||||
unregister_runtime_manager()
|
||||
clear_global_runtime_manager()
|
||||
|
||||
|
||||
def build_arg_parser() -> argparse.ArgumentParser:
|
||||
"""Build the CLI parser for the gateway runtime entrypoint."""
|
||||
parser = argparse.ArgumentParser(description="Trading System")
|
||||
parser.add_argument("--mode", choices=["live", "backtest"], default="live")
|
||||
parser.add_argument(
|
||||
"--config-name",
|
||||
default="default_run",
|
||||
help=(
|
||||
"Run label under runs/<config_name>; not a special root-level "
|
||||
"live/backtest/production directory."
|
||||
),
|
||||
)
|
||||
parser.add_argument("--host", default="0.0.0.0")
|
||||
parser.add_argument("--port", type=int, default=8765)
|
||||
parser.add_argument(
|
||||
"--schedule-mode",
|
||||
choices=["daily", "intraday"],
|
||||
default="daily",
|
||||
)
|
||||
parser.add_argument("--trigger-time", default="09:30") # NYSE market open
|
||||
parser.add_argument("--interval-minutes", type=int, default=60)
|
||||
parser.add_argument("--poll-interval", type=int, default=10)
|
||||
parser.add_argument("--start-date")
|
||||
parser.add_argument("--end-date")
|
||||
parser.add_argument(
|
||||
"--enable-memory",
|
||||
action="store_true",
|
||||
help="Enable ReMeTaskLongTermMemory for agents",
|
||||
)
|
||||
return parser
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point"""
|
||||
parser = build_arg_parser()
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load config from env for logging
|
||||
runtime_config = _resolve_runtime_config(args)
|
||||
tickers = runtime_config["tickers"]
|
||||
initial_cash = runtime_config["initial_cash"]
|
||||
|
||||
logger.info("=" * 60)
|
||||
logger.info(f"Mode: {args.mode}, Config: {args.config_name}")
|
||||
logger.info(f"Tickers: {tickers}")
|
||||
logger.info(f"Initial Cash: ${initial_cash:,.2f}")
|
||||
logger.info(
|
||||
"Long-term Memory: %s",
|
||||
"enabled" if runtime_config["enable_memory"] else "disabled",
|
||||
)
|
||||
if args.mode == "backtest":
|
||||
if not args.start_date or not args.end_date:
|
||||
parser.error(
|
||||
"--start-date and --end-date required for backtest mode",
|
||||
)
|
||||
logger.info(f"Backtest: {args.start_date} to {args.end_date}")
|
||||
logger.info("=" * 60)
|
||||
|
||||
asyncio.run(run_with_gateway(args))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,25 +1,27 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, UTC
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Dict
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentRuntimeState:
|
||||
agent_id: str
|
||||
display_name: str | None = None
|
||||
status: str = "idle"
|
||||
last_session: str | None = None
|
||||
last_updated: datetime = field(default_factory=lambda: datetime.now(UTC))
|
||||
last_updated: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
|
||||
def update(self, status: str, session_key: str | None = None) -> None:
|
||||
self.status = status
|
||||
self.last_session = session_key
|
||||
self.last_updated = datetime.now(UTC)
|
||||
self.last_updated = datetime.now(timezone.utc)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"agent_id": self.agent_id,
|
||||
"display_name": self.display_name,
|
||||
"status": self.status,
|
||||
"last_session": self.last_session,
|
||||
"last_updated": self.last_updated.isoformat(),
|
||||
|
||||
@@ -2,7 +2,7 @@ from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from datetime import datetime, UTC
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
@@ -93,7 +93,7 @@ class TradingRuntimeManager:
|
||||
|
||||
def log_event(self, event: str, details: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
||||
entry = {
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"event": event,
|
||||
"details": details or {},
|
||||
"session": self.current_session_key,
|
||||
@@ -102,15 +102,25 @@ class TradingRuntimeManager:
|
||||
self._persist_snapshot()
|
||||
return entry
|
||||
|
||||
def register_agent(self, agent_id: str) -> AgentRuntimeState:
|
||||
state = AgentRuntimeState(agent_id=agent_id)
|
||||
def register_agent(
|
||||
self,
|
||||
agent_id: str,
|
||||
display_name: Optional[str] = None,
|
||||
) -> AgentRuntimeState:
|
||||
state = AgentRuntimeState(agent_id=agent_id, display_name=display_name)
|
||||
self.registry.register(agent_id, state)
|
||||
self._persist_snapshot()
|
||||
return state
|
||||
|
||||
def unregister_agent(self, agent_id: str) -> Optional[AgentRuntimeState]:
|
||||
state = self.registry.unregister(agent_id)
|
||||
if state is not None:
|
||||
self._persist_snapshot()
|
||||
return state
|
||||
|
||||
def register_pending_approval(self, approval_id: str, payload: Dict[str, Any]) -> None:
|
||||
payload.setdefault("status", "pending")
|
||||
payload.setdefault("created_at", datetime.now(UTC).isoformat())
|
||||
payload.setdefault("created_at", datetime.now(timezone.utc).isoformat())
|
||||
self.pending_approvals[approval_id] = payload
|
||||
self._persist_snapshot()
|
||||
|
||||
@@ -139,7 +149,7 @@ class TradingRuntimeManager:
|
||||
if not entry:
|
||||
return
|
||||
entry["status"] = status
|
||||
entry["resolved_at"] = datetime.now(UTC).isoformat()
|
||||
entry["resolved_at"] = datetime.now(timezone.utc).isoformat()
|
||||
entry["resolved_by"] = resolved_by
|
||||
self._persist_snapshot()
|
||||
|
||||
|
||||
@@ -13,6 +13,9 @@ class RuntimeRegistry:
|
||||
def get(self, agent_id: str) -> Optional["AgentRuntimeState"]:
|
||||
return self._states.get(agent_id)
|
||||
|
||||
def unregister(self, agent_id: str) -> Optional["AgentRuntimeState"]:
|
||||
return self._states.pop(agent_id, None)
|
||||
|
||||
def list_agents(self) -> list[str]:
|
||||
return list(self._states.keys())
|
||||
|
||||
|
||||
@@ -148,8 +148,9 @@ class Gateway:
|
||||
self.handle_client,
|
||||
host,
|
||||
port,
|
||||
ping_interval=30,
|
||||
ping_timeout=60,
|
||||
ping_interval=20,
|
||||
ping_timeout=120,
|
||||
max_size=10 * 1024 * 1024, # 10MB
|
||||
)
|
||||
logger.info(f"WebSocket server ready: ws://{host}:{port}")
|
||||
|
||||
@@ -833,12 +834,18 @@ class Gateway:
|
||||
if not self.connected_clients:
|
||||
return
|
||||
|
||||
message_json = json.dumps(message, ensure_ascii=False, default=str)
|
||||
# Offload potentially heavy JSON serialization to thread
|
||||
message_json = await asyncio.to_thread(
|
||||
json.dumps, message, ensure_ascii=False, default=str
|
||||
)
|
||||
|
||||
async with self.lock:
|
||||
# Filter only active clients to minimize unnecessary send attempts
|
||||
# In websockets v13+, we must check state.name == 'OPEN'
|
||||
active_clients = [c for c in self.connected_clients if c.state.name == 'OPEN']
|
||||
tasks = [
|
||||
self._send_to_client(client, message_json)
|
||||
for client in self.connected_clients.copy()
|
||||
for client in active_clients
|
||||
]
|
||||
|
||||
if tasks:
|
||||
@@ -849,9 +856,14 @@ class Gateway:
|
||||
client: ServerConnection,
|
||||
message: str,
|
||||
):
|
||||
if client.state.name != 'OPEN':
|
||||
async with self.lock:
|
||||
self.connected_clients.discard(client)
|
||||
return
|
||||
|
||||
try:
|
||||
await client.send(message)
|
||||
except websockets.ConnectionClosed:
|
||||
except (websockets.ConnectionClosed, Exception):
|
||||
async with self.lock:
|
||||
self.connected_clients.discard(client)
|
||||
|
||||
|
||||
@@ -25,6 +25,13 @@ from backend.config.bootstrap_config import (
|
||||
from backend.llm.models import get_agent_model_info
|
||||
|
||||
|
||||
def _normalize_schedule_mode(value: Any) -> str:
|
||||
mode = str(value or "daily").strip().lower()
|
||||
if mode == "intraday":
|
||||
return "interval"
|
||||
return mode or "daily"
|
||||
|
||||
|
||||
async def handle_reload_runtime_assets(gateway: Any) -> None:
|
||||
config_name = gateway.config.get("config_name", "default")
|
||||
runtime_config = resolve_runtime_config(
|
||||
@@ -44,10 +51,10 @@ async def handle_reload_runtime_assets(gateway: Any) -> None:
|
||||
async def handle_update_runtime_config(gateway: Any, websocket: Any, data: dict[str, Any]) -> None:
|
||||
updates: dict[str, Any] = {}
|
||||
|
||||
schedule_mode = str(data.get("schedule_mode", "")).strip().lower()
|
||||
schedule_mode = _normalize_schedule_mode(data.get("schedule_mode", ""))
|
||||
if schedule_mode:
|
||||
if schedule_mode not in {"daily", "intraday"}:
|
||||
await websocket.send(json.dumps({"type": "error", "message": "schedule_mode must be 'daily' or 'intraday'."}, ensure_ascii=False))
|
||||
if schedule_mode not in {"daily", "interval"}:
|
||||
await websocket.send(json.dumps({"type": "error", "message": "schedule_mode must be 'daily' or 'interval'."}, ensure_ascii=False))
|
||||
return
|
||||
updates["schedule_mode"] = schedule_mode
|
||||
|
||||
|
||||
@@ -208,7 +208,7 @@ async def run_live_cycle(gateway: Any, date: str, tickers: list[str]) -> None:
|
||||
market_status = gateway.market_service.get_market_status()
|
||||
current_prices = gateway.market_service.get_all_prices()
|
||||
|
||||
if schedule_mode == "intraday":
|
||||
if schedule_mode in {"interval", "intraday"}:
|
||||
execute_decisions = market_status.get("status") == "open"
|
||||
if execute_decisions:
|
||||
await gateway.state_sync.on_system_message("定时任务触发:当前处于交易时段,本轮将执行交易决策")
|
||||
@@ -253,7 +253,8 @@ async def finalize_cycle(gateway: Any, date: str) -> None:
|
||||
|
||||
async def get_market_caps(gateway: Any, tickers: list[str], date: str) -> dict[str, float]:
|
||||
market_caps: dict[str, float] = {}
|
||||
for ticker in tickers:
|
||||
|
||||
async def _get_one(ticker: str):
|
||||
try:
|
||||
market_cap = None
|
||||
response = await gateway._call_trading_service(
|
||||
@@ -263,12 +264,21 @@ async def get_market_caps(gateway: Any, tickers: list[str], date: str) -> dict[s
|
||||
if response is not None:
|
||||
market_cap = response.get("market_cap")
|
||||
if market_cap is None:
|
||||
payload = trading_domain.get_market_cap_payload(ticker=ticker, end_date=date)
|
||||
payload = await asyncio.to_thread(
|
||||
trading_domain.get_market_cap_payload,
|
||||
ticker=ticker,
|
||||
end_date=date,
|
||||
)
|
||||
market_cap = payload.get("market_cap")
|
||||
market_caps[ticker] = market_cap if market_cap else 1e9
|
||||
return ticker, (market_cap if market_cap else 1e9)
|
||||
except Exception as exc:
|
||||
logger.warning("Failed to get market cap for %s, using default 1e9: %s", ticker, exc)
|
||||
market_caps[ticker] = 1e9
|
||||
return ticker, 1e9
|
||||
|
||||
tasks = [_get_one(ticker) for ticker in tickers]
|
||||
results = await asyncio.gather(*tasks)
|
||||
for ticker, mc in results:
|
||||
market_caps[ticker] = mc
|
||||
return market_caps
|
||||
|
||||
|
||||
|
||||
@@ -8,6 +8,13 @@ from typing import Any
|
||||
from backend.data.provider_utils import normalize_symbol
|
||||
|
||||
|
||||
def _normalize_schedule_mode(value: Any) -> str:
|
||||
mode = str(value or "daily").strip().lower()
|
||||
if mode == "intraday":
|
||||
return "interval"
|
||||
return mode or "daily"
|
||||
|
||||
|
||||
def normalize_watchlist(raw_tickers: Any) -> list[str]:
|
||||
"""Parse watchlist payloads from websocket messages."""
|
||||
if raw_tickers is None:
|
||||
@@ -51,9 +58,11 @@ def apply_runtime_config(gateway: Any, runtime_config: dict[str, Any]) -> dict[s
|
||||
|
||||
gateway.pipeline.max_comm_cycles = int(runtime_config["max_comm_cycles"])
|
||||
gateway.config["max_comm_cycles"] = gateway.pipeline.max_comm_cycles
|
||||
gateway.config["schedule_mode"] = runtime_config.get(
|
||||
gateway.config["schedule_mode"] = _normalize_schedule_mode(
|
||||
runtime_config.get(
|
||||
"schedule_mode",
|
||||
gateway.config.get("schedule_mode", "daily"),
|
||||
),
|
||||
)
|
||||
gateway.config["interval_minutes"] = int(
|
||||
runtime_config.get(
|
||||
|
||||
@@ -529,7 +529,8 @@ async def handle_get_stock_technical_indicators(gateway: Any, websocket: Any, da
|
||||
|
||||
try:
|
||||
end_date = datetime.now()
|
||||
start_date = end_date - timedelta(days=250)
|
||||
# Reduced from 250 to 150 days to lower CPU/memory pressure while still supporting MA200 (approx 140 trading days)
|
||||
start_date = end_date - timedelta(days=150)
|
||||
|
||||
prices = None
|
||||
response = await gateway._call_trading_service(
|
||||
@@ -544,7 +545,9 @@ async def handle_get_stock_technical_indicators(gateway: Any, websocket: Any, da
|
||||
prices = response.prices
|
||||
|
||||
if prices is None:
|
||||
payload = trading_domain.get_prices_payload(
|
||||
# Offload domain logic to thread
|
||||
payload = await asyncio.to_thread(
|
||||
trading_domain.get_prices_payload,
|
||||
ticker=ticker,
|
||||
start_date=start_date.strftime("%Y-%m-%d"),
|
||||
end_date=end_date.strftime("%Y-%m-%d"),
|
||||
@@ -560,20 +563,21 @@ async def handle_get_stock_technical_indicators(gateway: Any, websocket: Any, da
|
||||
}, ensure_ascii=False))
|
||||
return
|
||||
|
||||
def _calc():
|
||||
df = prices_to_df(prices)
|
||||
signal = gateway._technical_analyzer.analyze(ticker, df)
|
||||
|
||||
df_sorted = df.sort_values("time").reset_index(drop=True)
|
||||
df_sorted["returns"] = df_sorted["close"].pct_change()
|
||||
vol_10 = float(df_sorted["returns"].tail(10).std() * (252**0.5) * 100) if len(df_sorted) >= 10 else None
|
||||
vol_20 = float(df_sorted["returns"].tail(20).std() * (252**0.5) * 100) if len(df_sorted) >= 20 else None
|
||||
vol_60 = float(df_sorted["returns"].tail(60).std() * (252**0.5) * 100) if len(df_sorted) >= 60 else None
|
||||
ma_distance = {}
|
||||
for ma_key in ["ma5", "ma10", "ma20", "ma50", "ma200"]:
|
||||
ma_value = getattr(signal, ma_key, None)
|
||||
ma_distance[ma_key] = ((signal.current_price - ma_value) / ma_value) * 100 if ma_value and ma_value > 0 else None
|
||||
v10 = float(df_sorted["returns"].tail(10).std() * (252**0.5) * 100) if len(df_sorted) >= 10 else None
|
||||
v20 = float(df_sorted["returns"].tail(20).std() * (252**0.5) * 100) if len(df_sorted) >= 20 else None
|
||||
v60 = float(df_sorted["returns"].tail(60).std() * (252**0.5) * 100) if len(df_sorted) >= 60 else None
|
||||
|
||||
indicators = {
|
||||
ma_dist = {}
|
||||
for ma_key in ["ma5", "ma10", "ma20", "ma50", "ma200"]:
|
||||
ma_val = getattr(signal, ma_key, None)
|
||||
ma_dist[ma_key] = ((signal.current_price - ma_val) / ma_val) * 100 if ma_val and ma_val > 0 else None
|
||||
|
||||
return {
|
||||
"ticker": ticker,
|
||||
"current_price": signal.current_price,
|
||||
"ma": {
|
||||
@@ -582,7 +586,7 @@ async def handle_get_stock_technical_indicators(gateway: Any, websocket: Any, da
|
||||
"ma20": signal.ma20,
|
||||
"ma50": signal.ma50,
|
||||
"ma200": signal.ma200,
|
||||
"distance": ma_distance,
|
||||
"distance": ma_dist,
|
||||
},
|
||||
"rsi": {
|
||||
"rsi14": signal.rsi14,
|
||||
@@ -599,9 +603,9 @@ async def handle_get_stock_technical_indicators(gateway: Any, websocket: Any, da
|
||||
"lower": signal.bollinger_lower,
|
||||
},
|
||||
"volatility": {
|
||||
"vol_10d": vol_10,
|
||||
"vol_20d": vol_20,
|
||||
"vol_60d": vol_60,
|
||||
"vol_10d": v10,
|
||||
"vol_20d": v20,
|
||||
"vol_60d": v60,
|
||||
"annualized": signal.annualized_volatility_pct,
|
||||
"risk_level": signal.risk_level,
|
||||
},
|
||||
@@ -609,11 +613,25 @@ async def handle_get_stock_technical_indicators(gateway: Any, websocket: Any, da
|
||||
"mean_reversion": signal.mean_reversion_signal,
|
||||
}
|
||||
|
||||
await websocket.send(json.dumps({
|
||||
# Use a semaphore to prevent too many concurrent CPU-intensive calculations
|
||||
# which can block the event loop heartbeats.
|
||||
if not hasattr(gateway, "_calc_sem"):
|
||||
gateway._calc_sem = asyncio.Semaphore(3)
|
||||
|
||||
async with gateway._calc_sem:
|
||||
indicators = await asyncio.to_thread(_calc)
|
||||
|
||||
# Also offload JSON serialization to thread to avoid blocking main loop
|
||||
msg = await asyncio.to_thread(json.dumps, {
|
||||
"type": "stock_technical_indicators_loaded",
|
||||
"ticker": ticker,
|
||||
"indicators": indicators,
|
||||
}, ensure_ascii=False, default=str))
|
||||
}, ensure_ascii=False, default=str)
|
||||
|
||||
if websocket.state.name == 'OPEN':
|
||||
await websocket.send(msg)
|
||||
else:
|
||||
logger.warning("Websocket closed for %s, skipping indicator send", ticker)
|
||||
except Exception as exc:
|
||||
logger.exception("Error getting technical indicators for %s", ticker)
|
||||
await websocket.send(json.dumps({
|
||||
|
||||
@@ -7,6 +7,7 @@ Handles reading/writing dashboard JSON files and portfolio state
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
@@ -950,11 +951,14 @@ class StorageService:
|
||||
|
||||
def save_server_state(self, state: Dict[str, Any]):
|
||||
"""
|
||||
Save server state to file
|
||||
|
||||
Args:
|
||||
state: Server state dictionary
|
||||
Save server state to file with rate-limiting to avoid I/O storms.
|
||||
"""
|
||||
now = time.time()
|
||||
# Ensure at least 2 seconds between physical disk writes
|
||||
if hasattr(self, "_last_save_time") and (now - self._last_save_time) < 2.0:
|
||||
return
|
||||
self._last_save_time = now
|
||||
|
||||
state_to_save = {
|
||||
**state,
|
||||
"last_saved": datetime.now().isoformat(),
|
||||
@@ -970,14 +974,17 @@ class StorageService:
|
||||
if "trades" in state_to_save:
|
||||
state_to_save["trades"] = state_to_save["trades"][:100]
|
||||
|
||||
try:
|
||||
with open(self.server_state_file, "w", encoding="utf-8") as f:
|
||||
# Removed indent=2 to minimize file size and serialization overhead
|
||||
json.dump(
|
||||
state_to_save,
|
||||
f,
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
default=str,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save server state: {e}")
|
||||
|
||||
logger.debug(f"Server state saved to: {self.server_state_file}")
|
||||
|
||||
|
||||
189
backend/skills/builtin/dynamic_team_management/SKILL.md
Normal file
@@ -0,0 +1,189 @@
|
||||
---
|
||||
name: dynamic_team_management
|
||||
description: 动态管理团队中的分析师Agent,包括创建、克隆、移除分析师,以及查看可用分析师类型。
|
||||
version: 1.0.0
|
||||
tools:
|
||||
- create_analyst
|
||||
- clone_analyst
|
||||
- remove_analyst
|
||||
- list_analyst_types
|
||||
- get_analyst_info
|
||||
- get_team_summary
|
||||
---
|
||||
|
||||
# 动态团队管理
|
||||
|
||||
当你需要调整分析师团队组成时,使用这个技能。投资经理可以动态创建新的分析师、克隆现有分析师进行定制、或移除不再需要的分析师。
|
||||
|
||||
## 1) When to use
|
||||
|
||||
- 当前团队缺乏特定领域的分析能力(如期权、加密货币、ESG等)
|
||||
- 需要多个相同类型但不同配置的分析师(如"激进型技术分析师"和"保守型技术分析师")
|
||||
- 需要临时增加分析力量应对特殊市场环境
|
||||
- 发现某个分析师配置不当,需要移除并重建
|
||||
- 在团队讨论中发现需要新的分析视角
|
||||
|
||||
## 2) Required inputs
|
||||
|
||||
### 创建分析师 (create_analyst)
|
||||
- **agent_id**: 唯一标识符(如 "options_specialist_01")
|
||||
- **analyst_type**: 基础类型(如 "technical_analyst")或自定义标识
|
||||
- **可选**: name, focus, description, soul_md, agents_md, model_name
|
||||
|
||||
### 克隆分析师 (clone_analyst)
|
||||
- **source_id**: 源分析师ID(如 "technical_analyst")
|
||||
- **new_id**: 新分析师ID(如 "crypto_technical_01")
|
||||
- **可选**: name, focus_additions, description_override, model_name
|
||||
|
||||
### 移除分析师 (remove_analyst)
|
||||
- **agent_id**: 要移除的分析师ID
|
||||
|
||||
## 3) Decision procedure
|
||||
|
||||
1. **评估当前团队能力缺口**
|
||||
- 查看当前活跃的分析师列表
|
||||
- 识别缺失的分析视角或专业领域
|
||||
|
||||
2. **选择创建策略**
|
||||
- 基于现有类型创建:指定analyst_type,提供自定义配置
|
||||
- 完全自定义:提供完整的persona定义
|
||||
- 克隆并修改:从现有分析师复制并应用覆盖
|
||||
|
||||
3. **配置分析师**
|
||||
- 设置唯一agent_id
|
||||
- 定义显示名称和关注点
|
||||
- 可选:提供自定义SOUL.md内容以精确定义行为
|
||||
|
||||
4. **验证创建结果**
|
||||
- 检查返回的success状态
|
||||
- 确认新分析师已加入活跃列表
|
||||
|
||||
## 4) Tool call policy
|
||||
|
||||
- **create_analyst**: 用于创建全新的分析师实例
|
||||
- 必须提供唯一的agent_id
|
||||
- 基于预定义类型时,analyst_type必须在可用类型列表中,或提供完整自定义配置
|
||||
- 工具调用失败时,检查agent_id是否已存在
|
||||
|
||||
- **clone_analyst**: 用于基于现有分析师创建变体
|
||||
- 适用于:创建专注于特定行业的分析师(如从technical_analyst克隆crypto_technical)
|
||||
- 新实例继承源配置,应用指定的覆盖
|
||||
|
||||
- **remove_analyst**: 用于移除动态创建的分析师
|
||||
- 只能移除通过本技能创建的分析师
|
||||
- 系统预定义分析师(fundamentals_analyst等)不可移除
|
||||
|
||||
- **list_analyst_types**: 用于查看所有可用分析师类型
|
||||
- 返回预定义类型 + 运行时注册类型
|
||||
|
||||
- **get_analyst_info**: 用于查看特定分析师的详细配置
|
||||
|
||||
- **get_team_summary**: 用于查看团队整体构成
|
||||
|
||||
## 5) Output schema
|
||||
|
||||
### create_analyst / clone_analyst 输出
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"agent_id": "options_specialist_01",
|
||||
"message": "Created runtime analyst 'options_specialist_01' (technical_analyst).",
|
||||
"analyst_type": "technical_analyst"
|
||||
}
|
||||
```
|
||||
|
||||
### remove_analyst 输出
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"agent_id": "options_specialist_01",
|
||||
"message": "Removed runtime analyst 'options_specialist_01'."
|
||||
}
|
||||
```
|
||||
|
||||
### list_analyst_types 输出
|
||||
```json
|
||||
[
|
||||
{
|
||||
"type_id": "fundamentals_analyst",
|
||||
"name": "Fundamentals Analyst",
|
||||
"description": "...",
|
||||
"is_builtin": true,
|
||||
"source": "constants"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## 6) Failure fallback
|
||||
|
||||
- **agent_id已存在**: 返回错误,提示选择新的agent_id或使用clone_analyst基于现有创建变体
|
||||
- **analyst_type未知**: 提示使用list_analyst_types查看可用类型,或提供完整的自定义persona
|
||||
- **创建失败**: 检查系统日志,可能原因包括:模型配置错误、工作空间权限问题
|
||||
- **移除失败**: 确认分析师是通过动态创建(系统预定义分析师不可移除)
|
||||
|
||||
## 重要约定
|
||||
|
||||
### Agent ID 命名规则
|
||||
|
||||
为了使新创建的分析师能够正常工作,**agent_id 必须以 `_analyst` 结尾**。这是系统识别分析师类型并分配相应工具的关键约定。
|
||||
|
||||
- ✅ **正确**: `options_specialist_analyst`, `crypto_technical_analyst`
|
||||
- ❌ **错误**: `options_specialist`, `crypto_expert`
|
||||
|
||||
如果不遵循此约定,分析师将无法获得分析工具组(基本面、技术、情绪、估值等工具)。
|
||||
|
||||
### 全新自定义类型 vs 基于现有类型
|
||||
|
||||
**基于现有类型**(推荐用于快速创建):
|
||||
- 使用 `analyst_type: "technical_analyst"` 等预定义类型
|
||||
- 可以覆盖 persona、SOUL.md 等配置
|
||||
- 工具组根据 `analyst_type` 自动选择
|
||||
|
||||
**全新自定义类型**(用于完全自定义):
|
||||
- 设置 `analyst_type` 为自定义标识(如 `"custom"`)或任意字符串
|
||||
- 必须提供完整的 `persona` 定义
|
||||
- 建议提供 `soul_md` 精确定义行为
|
||||
- **agent_id 必须仍然以 `_analyst` 结尾**
|
||||
|
||||
## 最佳实践
|
||||
|
||||
1. **命名约定**: 使用描述性agent_id,如 `industry_tech_analyst` 而非 `analyst_01`,**必须以 `_analyst` 结尾**
|
||||
2. **版本控制**: 克隆分析师时,在new_id中包含版本信息,如 `technical_v2_crypto_analyst`
|
||||
3. **文档记录**: 创建自定义分析师时,提供详细的description,便于后续理解和维护
|
||||
4. **资源管理**: 定期使用get_team_summary检查团队规模,移除不再需要的分析师
|
||||
|
||||
## 示例场景
|
||||
|
||||
### 场景1: 添加加密货币分析师
|
||||
```
|
||||
创建一个新的分析师,专注于加密货币技术分析:
|
||||
- agent_id: "crypto_technical_01"
|
||||
- analyst_type: "technical_analyst"
|
||||
- name: "加密货币技术分析师"
|
||||
- focus: ["链上数据分析", "DeFi协议", "加密货币技术指标"]
|
||||
```
|
||||
|
||||
### 场景2: 克隆并定制
|
||||
```
|
||||
基于technical_analyst创建一个更激进的版本:
|
||||
- source_id: "technical_analyst"
|
||||
- new_id: "technical_aggressive_01"
|
||||
- name: "激进技术分析师"
|
||||
- focus_additions: ["高波动交易", "突破策略"]
|
||||
- description_override: "专注于高风险高回报的技术策略..."
|
||||
```
|
||||
|
||||
### 场景3: 创建全新自定义类型(期权专家)
|
||||
```
|
||||
创建一个完全自定义的期权分析师(注意agent_id以_analyst结尾):
|
||||
- agent_id: "options_strategist_analyst"
|
||||
- analyst_type: "custom" # 使用非预定义类型
|
||||
- name: "期权策略分析师"
|
||||
- focus: ["期权定价", "希腊字母", "波动率曲面"]
|
||||
- soul_md: "# 角色定义\n你是期权策略专家,专注于..."
|
||||
```
|
||||
|
||||
**说明**:
|
||||
- 即使 `analyst_type` 是 "custom"(不在预定义类型中),只要提供完整的 `persona` 和 `soul_md`,系统就能创建功能完整的分析师
|
||||
- `agent_id` 必须以 `_analyst` 结尾才能获得分析工具
|
||||
- 模型使用全局默认,或通过 `model_name` 参数指定
|
||||
@@ -23,15 +23,17 @@ version: 1.0.0
|
||||
## 3) Decision procedure
|
||||
|
||||
1. 汇总并比较 analyst 信号,识别共识与分歧。
|
||||
2. 将风险警示映射到仓位上限与禁开条件。
|
||||
3. 在资金与保证金约束下,为每个 ticker 生成候选动作与数量。
|
||||
4. 对冲突信号执行保守仲裁:降低仓位、提高触发门槛或改为 `hold`。
|
||||
5. 逐个 ticker 记录最终决策,并给出组合级理由。
|
||||
2. 先判断当前团队是否覆盖了本轮任务所需的专业能力;若未覆盖,优先扩编团队而不是直接仲裁。
|
||||
3. 将风险警示映射到仓位上限与禁开条件。
|
||||
4. 在资金与保证金约束下,为每个 ticker 生成候选动作与数量。
|
||||
5. 对冲突信号执行保守仲裁:降低仓位、提高触发门槛、补充 analyst,或改为 `hold`。
|
||||
6. 逐个 ticker 记录最终决策,并给出组合级理由。
|
||||
|
||||
## 4) Tool call policy
|
||||
|
||||
- 必须使用决策工具记录每个 ticker 的最终 `action/quantity`。
|
||||
- 在讨论阶段如发现当前团队能力不足,可使用团队工具动态创建或移除 analyst(再继续讨论)。
|
||||
- 在讨论阶段如发现当前团队能力不足、证据链断裂、或观点冲突无法裁决,必须优先使用团队工具动态创建或克隆 analyst(再继续讨论)。
|
||||
- 如果已经判断“需要更多专业分析”,但没有调用动态团队工具补齐团队,则不得输出高置信度最终决策。
|
||||
- 若风险工具提示阻断项,优先遵循阻断,不得绕过。
|
||||
- 工具调用失败时:重试一次;仍失败则输出结构化“未完成决策清单”和人工处理建议。
|
||||
|
||||
@@ -46,5 +48,6 @@ version: 1.0.0
|
||||
## 6) Failure fallback
|
||||
|
||||
- 当分析师信号与风险结论显著冲突时,默认采用更小仓位或 `hold`。
|
||||
- 当任务明显超出当前团队能力边界时,优先扩编团队;如果扩编失败,再降级为 `hold` 或条件决策草案。
|
||||
- 当约束校验失败(现金/保证金不足)时,自动下调数量,不输出不可执行指令。
|
||||
- 当任务要求完整清单时,不允许遗漏 ticker;无法决策时必须显式标记 `hold` 并说明原因。
|
||||
|
||||
@@ -10,12 +10,15 @@ description: 整合分析师观点与风险反馈,形成明确的组合层决
|
||||
## 工作流程
|
||||
|
||||
1. 行动前先阅读分析师结论和风险警示。
|
||||
2. 评估当前组合、现金和保证金约束。
|
||||
3. 使用决策工具为每个 ticker 记录一个明确决策。
|
||||
4. 在全部决策记录完成后,总结组合层面的整体理由。
|
||||
2. 先判断当前团队是否足以覆盖本轮任务;如果不够,先扩编团队再继续。
|
||||
3. 评估当前组合、现金和保证金约束。
|
||||
4. 使用决策工具为每个 ticker 记录一个明确决策。
|
||||
5. 在全部决策记录完成后,总结组合层面的整体理由。
|
||||
|
||||
## 约束
|
||||
|
||||
- 仓位大小必须遵守资金和保证金限制。
|
||||
- 当分析师信心与风险信号不一致时,优先采用更小仓位。
|
||||
- 当任务超出当前团队能力边界时,应优先使用动态团队工具创建或克隆分析师。
|
||||
- 如果已经识别出覆盖缺口,不应跳过扩编步骤直接给出高置信度结论。
|
||||
- 当任务要求完整决策清单时,不要让任何 ticker 处于未决状态。
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
from backend.apps.agent_service import create_app
|
||||
from backend.api import agents as agents_module
|
||||
from backend.api import runs as runs_module
|
||||
|
||||
|
||||
def test_agent_service_routes_include_control_plane_endpoints(tmp_path):
|
||||
@@ -73,10 +73,10 @@ def test_agent_service_read_routes(monkeypatch, tmp_path):
|
||||
def load_agent_file(self, config_name, agent_id, filename):
|
||||
return f"{config_name}:{agent_id}:{filename}"
|
||||
|
||||
monkeypatch.setattr(agents_module, "load_agent_profiles", lambda: {"portfolio_manager": {"skills": ["demo_skill"]}})
|
||||
monkeypatch.setattr(agents_module, "get_agent_model_info", lambda agent_id: ("deepseek-v3.2", "DASHSCOPE"))
|
||||
monkeypatch.setattr(runs_module, "load_agent_profiles", lambda: {"portfolio_manager": {"skills": ["demo_skill"]}})
|
||||
monkeypatch.setattr(runs_module, "get_agent_model_info", lambda agent_id: ("deepseek-v3.2", "DASHSCOPE"))
|
||||
monkeypatch.setattr(
|
||||
agents_module,
|
||||
runs_module,
|
||||
"load_agent_workspace_config",
|
||||
lambda path: type(
|
||||
"Cfg",
|
||||
@@ -91,20 +91,20 @@ def test_agent_service_read_routes(monkeypatch, tmp_path):
|
||||
)(),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
agents_module,
|
||||
runs_module,
|
||||
"get_bootstrap_config_for_run",
|
||||
lambda project_root, config_name: type("Bootstrap", (), {"agent_override": lambda self, agent_id: {}})(),
|
||||
)
|
||||
|
||||
app = create_app(project_root=tmp_path)
|
||||
app.dependency_overrides[agents_module.get_skills_manager] = lambda: _FakeSkillsManager()
|
||||
app.dependency_overrides[agents_module.get_workspace_manager] = lambda: _FakeWorkspaceManager()
|
||||
app.dependency_overrides[runs_module.get_skills_manager] = lambda: _FakeSkillsManager()
|
||||
app.dependency_overrides[runs_module.get_workspace_manager] = lambda: _FakeWorkspaceManager()
|
||||
|
||||
with TestClient(app) as client:
|
||||
profile = client.get("/api/workspaces/demo/agents/portfolio_manager/profile")
|
||||
skills = client.get("/api/workspaces/demo/agents/portfolio_manager/skills")
|
||||
detail = client.get("/api/workspaces/demo/agents/portfolio_manager/skills/demo_skill")
|
||||
workspace_file = client.get("/api/workspaces/demo/agents/portfolio_manager/files/MEMORY.md")
|
||||
profile = client.get("/api/runs/demo/agents/portfolio_manager/profile")
|
||||
skills = client.get("/api/runs/demo/agents/portfolio_manager/skills")
|
||||
detail = client.get("/api/runs/demo/agents/portfolio_manager/skills/demo_skill")
|
||||
workspace_file = client.get("/api/runs/demo/agents/portfolio_manager/files/MEMORY.md")
|
||||
|
||||
assert profile.status_code == 200
|
||||
assert profile.json()["profile"]["model_name"] == "deepseek-v3.2"
|
||||
@@ -118,4 +118,3 @@ def test_agent_service_read_routes(monkeypatch, tmp_path):
|
||||
assert workspace_file.status_code == 200
|
||||
assert workspace_file.json()["content"] == "demo:portfolio_manager:MEMORY.md"
|
||||
assert workspace_file.json()["scope_type"] == "runtime_run"
|
||||
assert "runs/<run_id>" in workspace_file.json()["scope_note"]
|
||||
|
||||
@@ -6,14 +6,14 @@ from pathlib import Path
|
||||
|
||||
|
||||
def test_main_resolve_evo_agent_ids_filters_unsupported_roles(monkeypatch):
|
||||
from backend import main as main_module
|
||||
from backend.core import pipeline_runner as runner_module
|
||||
|
||||
monkeypatch.setenv(
|
||||
"EVO_AGENT_IDS",
|
||||
"fundamentals_analyst,portfolio_manager,unknown,technical_analyst",
|
||||
)
|
||||
|
||||
resolved = main_module._resolve_evo_agent_ids()
|
||||
resolved = runner_module._resolve_evo_agent_ids()
|
||||
|
||||
assert resolved == {"fundamentals_analyst", "portfolio_manager", "technical_analyst"}
|
||||
|
||||
@@ -29,7 +29,7 @@ def test_pipeline_runner_resolve_evo_agent_ids_keeps_supported_roles(monkeypatch
|
||||
|
||||
|
||||
def test_main_create_analyst_agent_can_build_evo_agent(monkeypatch, tmp_path):
|
||||
from backend import main as main_module
|
||||
from backend.core import pipeline_runner as runner_module
|
||||
|
||||
created = {}
|
||||
|
||||
@@ -49,12 +49,12 @@ def test_main_create_analyst_agent_can_build_evo_agent(monkeypatch, tmp_path):
|
||||
self.toolkit = None
|
||||
|
||||
monkeypatch.setenv("EVO_AGENT_IDS", "fundamentals_analyst")
|
||||
monkeypatch.setattr(main_module, "EvoAgent", DummyEvoAgent)
|
||||
monkeypatch.setattr(main_module, "create_agent_toolkit", lambda *args, **kwargs: "toolkit")
|
||||
monkeypatch.setattr(runner_module, "EvoAgent", DummyEvoAgent)
|
||||
monkeypatch.setattr(runner_module, "create_agent_toolkit", lambda *args, **kwargs: "toolkit")
|
||||
|
||||
agent = main_module._create_analyst_agent(
|
||||
agent = runner_module._create_analyst_agent(
|
||||
analyst_type="fundamentals_analyst",
|
||||
config_name="demo",
|
||||
run_id="demo",
|
||||
model="model",
|
||||
formatter="formatter",
|
||||
skills_manager=DummySkillsManager(),
|
||||
@@ -71,7 +71,7 @@ def test_main_create_analyst_agent_can_build_evo_agent(monkeypatch, tmp_path):
|
||||
|
||||
|
||||
def test_main_create_risk_manager_can_build_evo_agent(monkeypatch, tmp_path):
|
||||
from backend import main as main_module
|
||||
from backend.core import pipeline_runner as runner_module
|
||||
|
||||
created = {}
|
||||
|
||||
@@ -91,11 +91,11 @@ def test_main_create_risk_manager_can_build_evo_agent(monkeypatch, tmp_path):
|
||||
self.toolkit = None
|
||||
|
||||
monkeypatch.setenv("EVO_AGENT_IDS", "risk_manager")
|
||||
monkeypatch.setattr(main_module, "EvoAgent", DummyEvoAgent)
|
||||
monkeypatch.setattr(main_module, "create_agent_toolkit", lambda *args, **kwargs: "risk-toolkit")
|
||||
monkeypatch.setattr(runner_module, "EvoAgent", DummyEvoAgent)
|
||||
monkeypatch.setattr(runner_module, "create_agent_toolkit", lambda *args, **kwargs: "risk-toolkit")
|
||||
|
||||
agent = main_module._create_risk_manager_agent(
|
||||
config_name="demo",
|
||||
agent = runner_module._create_risk_manager_agent(
|
||||
run_id="demo",
|
||||
model="model",
|
||||
formatter="formatter",
|
||||
skills_manager=DummySkillsManager(),
|
||||
@@ -112,7 +112,7 @@ def test_main_create_risk_manager_can_build_evo_agent(monkeypatch, tmp_path):
|
||||
|
||||
|
||||
def test_main_create_portfolio_manager_can_build_evo_agent(monkeypatch, tmp_path):
|
||||
from backend import main as main_module
|
||||
from backend.core import pipeline_runner as runner_module
|
||||
|
||||
created = {}
|
||||
|
||||
@@ -132,15 +132,15 @@ def test_main_create_portfolio_manager_can_build_evo_agent(monkeypatch, tmp_path
|
||||
self.toolkit = None
|
||||
|
||||
monkeypatch.setenv("EVO_AGENT_IDS", "portfolio_manager")
|
||||
monkeypatch.setattr(main_module, "EvoAgent", DummyEvoAgent)
|
||||
monkeypatch.setattr(runner_module, "EvoAgent", DummyEvoAgent)
|
||||
monkeypatch.setattr(
|
||||
main_module,
|
||||
runner_module,
|
||||
"create_agent_toolkit",
|
||||
lambda *args, **kwargs: "pm-toolkit",
|
||||
)
|
||||
|
||||
agent = main_module._create_portfolio_manager_agent(
|
||||
config_name="demo",
|
||||
agent = runner_module._create_portfolio_manager_agent(
|
||||
run_id="demo",
|
||||
model="model",
|
||||
formatter="formatter",
|
||||
initial_cash=12345.0,
|
||||
@@ -305,80 +305,15 @@ def test_pipeline_create_runtime_analyst_uses_evo_agent_when_enabled(monkeypatch
|
||||
assert created.get("config_name") == "demo"
|
||||
|
||||
|
||||
def test_pipeline_create_runtime_analyst_uses_legacy_when_not_in_evo_ids(monkeypatch, tmp_path):
|
||||
"""Test that _create_runtime_analyst creates legacy AnalystAgent when not in EVO_AGENT_IDS."""
|
||||
from backend.core import pipeline as pipeline_module
|
||||
|
||||
created = {}
|
||||
|
||||
class DummyEvoAgent:
|
||||
name = "test_analyst"
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
created.update(kwargs)
|
||||
self.toolkit = None
|
||||
|
||||
class DummyAnalystAgent:
|
||||
name = "test_analyst"
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
created.update(kwargs)
|
||||
self.toolkit = None
|
||||
|
||||
# EVO_AGENT_IDS does not include fundamentals_analyst
|
||||
monkeypatch.setenv("EVO_AGENT_IDS", "technical_analyst")
|
||||
monkeypatch.setattr(pipeline_module, "EvoAgent", DummyEvoAgent)
|
||||
monkeypatch.setattr(pipeline_module, "AnalystAgent", DummyAnalystAgent)
|
||||
monkeypatch.setattr(
|
||||
pipeline_module,
|
||||
"create_agent_toolkit",
|
||||
lambda *args, **kwargs: "toolkit",
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
pipeline_module,
|
||||
"get_agent_model",
|
||||
lambda x: "model",
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
pipeline_module,
|
||||
"get_agent_formatter",
|
||||
lambda x: "formatter",
|
||||
)
|
||||
|
||||
# Create a mock pipeline instance
|
||||
class MockPM:
|
||||
def __init__(self):
|
||||
self.config = {"config_name": "demo"}
|
||||
|
||||
pipeline = pipeline_module.TradingPipeline(
|
||||
analysts=[],
|
||||
risk_manager=None,
|
||||
portfolio_manager=MockPM(),
|
||||
)
|
||||
|
||||
# Mock workspace_manager methods
|
||||
monkeypatch.setattr(
|
||||
pipeline_module.WorkspaceManager,
|
||||
"ensure_agent_assets",
|
||||
lambda *args, **kwargs: None,
|
||||
)
|
||||
|
||||
result = pipeline._create_runtime_analyst("test_analyst", "fundamentals_analyst")
|
||||
|
||||
assert "Created runtime analyst" in result
|
||||
# Should use legacy AnalystAgent
|
||||
assert created.get("analyst_type") == "fundamentals_analyst"
|
||||
|
||||
|
||||
def test_main_resolve_evo_agent_ids_returns_all_by_default(monkeypatch):
|
||||
"""Test that _resolve_evo_agent_ids returns all supported roles by default."""
|
||||
from backend import main as main_module
|
||||
from backend.core import pipeline_runner as runner_module
|
||||
from backend.config.constants import ANALYST_TYPES
|
||||
|
||||
# Unset EVO_AGENT_IDS to test default behavior
|
||||
monkeypatch.delenv("EVO_AGENT_IDS", raising=False)
|
||||
|
||||
resolved = main_module._resolve_evo_agent_ids()
|
||||
resolved = runner_module._resolve_evo_agent_ids()
|
||||
|
||||
expected = set(ANALYST_TYPES) | {"risk_manager", "portfolio_manager"}
|
||||
assert resolved == expected
|
||||
@@ -427,11 +362,3 @@ def test_evo_agent_supports_long_term_memory(monkeypatch, tmp_path):
|
||||
assert created["long_term_memory"] is dummy_memory
|
||||
|
||||
|
||||
def test_evo_agent_legacy_mode(monkeypatch):
|
||||
"""Test that EVO_AGENT_IDS=legacy disables EvoAgent."""
|
||||
from backend import main as main_module
|
||||
|
||||
monkeypatch.setenv("EVO_AGENT_IDS", "legacy")
|
||||
|
||||
resolved = main_module._resolve_evo_agent_ids()
|
||||
assert resolved == set()
|
||||
|
||||
@@ -159,11 +159,11 @@ def test_apply_runtime_config_updates_gateway_state():
|
||||
)
|
||||
|
||||
assert gateway.config["tickers"] == ["MSFT", "NVDA"]
|
||||
assert gateway.config["schedule_mode"] == "intraday"
|
||||
assert gateway.config["schedule_mode"] == "interval"
|
||||
assert gateway.storage.initial_cash == 150000.0
|
||||
assert result["runtime_config_applied"]["max_comm_cycles"] == 4
|
||||
assert gateway.scheduler.calls[-1] == {
|
||||
"mode": "intraday",
|
||||
"mode": "interval",
|
||||
"trigger_time": "10:30",
|
||||
"interval_minutes": 30,
|
||||
}
|
||||
|
||||
@@ -86,7 +86,7 @@ def test_runtime_service_get_runtime_config(monkeypatch, tmp_path):
|
||||
"---\n"
|
||||
"tickers:\n"
|
||||
" - AAPL\n"
|
||||
"schedule_mode: intraday\n"
|
||||
"schedule_mode: interval\n"
|
||||
"interval_minutes: 30\n"
|
||||
"trigger_time: '10:00'\n"
|
||||
"max_comm_cycles: 3\n"
|
||||
@@ -102,7 +102,7 @@ def test_runtime_service_get_runtime_config(monkeypatch, tmp_path):
|
||||
"run_dir": str(run_dir),
|
||||
"bootstrap_values": {
|
||||
"tickers": ["AAPL"],
|
||||
"schedule_mode": "intraday",
|
||||
"schedule_mode": "interval",
|
||||
"interval_minutes": 30,
|
||||
"trigger_time": "10:00",
|
||||
"max_comm_cycles": 3,
|
||||
@@ -123,7 +123,7 @@ def test_runtime_service_get_runtime_config(monkeypatch, tmp_path):
|
||||
assert response.status_code == 200
|
||||
payload = response.json()
|
||||
assert payload["run_id"] == "demo"
|
||||
assert payload["bootstrap"]["schedule_mode"] == "intraday"
|
||||
assert payload["bootstrap"]["schedule_mode"] == "interval"
|
||||
assert payload["resolved"]["interval_minutes"] == 30
|
||||
assert payload["resolved"]["enable_memory"] is True
|
||||
|
||||
@@ -190,7 +190,7 @@ def test_runtime_service_update_runtime_config_persists_bootstrap(monkeypatch, t
|
||||
response = client.put(
|
||||
"/api/runtime/config",
|
||||
json={
|
||||
"schedule_mode": "intraday",
|
||||
"schedule_mode": "interval",
|
||||
"interval_minutes": 15,
|
||||
"trigger_time": "10:15",
|
||||
"max_comm_cycles": 4,
|
||||
@@ -199,7 +199,7 @@ def test_runtime_service_update_runtime_config_persists_bootstrap(monkeypatch, t
|
||||
|
||||
assert response.status_code == 200
|
||||
payload = response.json()
|
||||
assert payload["bootstrap"]["schedule_mode"] == "intraday"
|
||||
assert payload["bootstrap"]["schedule_mode"] == "interval"
|
||||
assert payload["resolved"]["interval_minutes"] == 15
|
||||
assert "interval_minutes: 15" in (run_dir / "BOOTSTRAP.md").read_text(encoding="utf-8")
|
||||
|
||||
@@ -547,7 +547,7 @@ def test_start_runtime_restore_reuses_historical_run_id(monkeypatch, tmp_path):
|
||||
"run_dir": str(run_dir),
|
||||
"bootstrap_values": {
|
||||
"tickers": ["AAPL"],
|
||||
"schedule_mode": "intraday",
|
||||
"schedule_mode": "interval",
|
||||
"interval_minutes": 30,
|
||||
"trigger_time": "now",
|
||||
"max_comm_cycles": 2,
|
||||
|
||||
@@ -103,18 +103,28 @@ def _safe_float(value, default=0.0) -> float:
|
||||
|
||||
|
||||
def safe(func):
|
||||
"""Decorator to catch exceptions in tool functions."""
|
||||
"""Decorator to catch exceptions in both sync and async tool functions."""
|
||||
|
||||
if asyncio.iscoroutinefunction(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
async def async_wrapper(*args, **kwargs):
|
||||
try:
|
||||
return await func(*args, **kwargs)
|
||||
except Exception as e:
|
||||
error_msg = f"Error in {func.__name__}: {str(e)}"
|
||||
logger.error(f"{error_msg}\n{traceback.format_exc()}")
|
||||
return _to_text_response(f"[ERROR] {error_msg}")
|
||||
return async_wrapper
|
||||
else:
|
||||
@wraps(func)
|
||||
def sync_wrapper(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except Exception as e:
|
||||
error_msg = f"Error in {func.__name__}: {str(e)}"
|
||||
logger.error(f"{error_msg}\n{traceback.format_exc()}")
|
||||
return _to_text_response(f"[ERROR] {error_msg}")
|
||||
|
||||
return wrapper
|
||||
return sync_wrapper
|
||||
|
||||
|
||||
def _fmt(val, fmt=".2f", suffix="") -> str:
|
||||
@@ -141,7 +151,7 @@ def _resolved_date(current_date: Optional[str]) -> str:
|
||||
|
||||
|
||||
@safe
|
||||
def analyze_efficiency_ratios(
|
||||
async def analyze_efficiency_ratios(
|
||||
tickers: Optional[List[str]] = None,
|
||||
current_date: Optional[str] = None,
|
||||
) -> ToolResponse:
|
||||
@@ -163,21 +173,26 @@ def analyze_efficiency_ratios(
|
||||
tickers = _parse_tickers(tickers)
|
||||
lines = [f"=== Efficiency Ratios Analysis ({current_date}) ===\n"]
|
||||
|
||||
for ticker in tickers:
|
||||
metrics = get_financial_metrics(ticker=ticker, end_date=current_date)
|
||||
async def _fetch_one(ticker):
|
||||
try:
|
||||
metrics = await asyncio.to_thread(get_financial_metrics, ticker=ticker, end_date=current_date)
|
||||
if not metrics:
|
||||
lines.append(f"{ticker}: No data available\n")
|
||||
continue
|
||||
return f"{ticker}: No data available\n"
|
||||
|
||||
m = metrics[0]
|
||||
lines.append(f"{ticker}:")
|
||||
lines.append(f" Asset Turnover: {_fmt(m.asset_turnover)}")
|
||||
lines.append(f" Inventory Turnover: {_fmt(m.inventory_turnover)}")
|
||||
lines.append(f" Receivables Turnover: {_fmt(m.receivables_turnover)}")
|
||||
lines.append(
|
||||
f" Working Capital Turnover: {_fmt(m.working_capital_turnover)}",
|
||||
)
|
||||
lines.append("")
|
||||
ticker_lines = [
|
||||
f"{ticker}:",
|
||||
f" Asset Turnover: {_fmt(m.asset_turnover)}",
|
||||
f" Inventory Turnover: {_fmt(m.inventory_turnover)}",
|
||||
f" Receivables Turnover: {_fmt(m.receivables_turnover)}",
|
||||
f" Working Capital Turnover: {_fmt(m.working_capital_turnover)}\n",
|
||||
]
|
||||
return "\n".join(ticker_lines)
|
||||
except Exception as e:
|
||||
return f"{ticker}: Error - {str(e)}\n"
|
||||
|
||||
results = await asyncio.gather(*[_fetch_one(t) for t in tickers])
|
||||
lines.extend(results)
|
||||
|
||||
return _to_text_response("\n".join(lines))
|
||||
|
||||
@@ -310,7 +325,7 @@ def analyze_financial_health(
|
||||
|
||||
|
||||
@safe
|
||||
def analyze_valuation_ratios(
|
||||
async def analyze_valuation_ratios(
|
||||
tickers: Optional[List[str]] = None,
|
||||
current_date: Optional[str] = None,
|
||||
) -> ToolResponse:
|
||||
@@ -332,24 +347,31 @@ def analyze_valuation_ratios(
|
||||
tickers = _parse_tickers(tickers)
|
||||
lines = [f"=== Valuation Ratios Analysis ({current_date}) ===\n"]
|
||||
|
||||
for ticker in tickers:
|
||||
metrics = get_financial_metrics(ticker=ticker, end_date=current_date)
|
||||
async def _fetch_one(ticker):
|
||||
try:
|
||||
metrics = await asyncio.to_thread(get_financial_metrics, ticker=ticker, end_date=current_date)
|
||||
if not metrics:
|
||||
lines.append(f"{ticker}: No data available\n")
|
||||
continue
|
||||
return f"{ticker}: No data available\n"
|
||||
|
||||
m = metrics[0]
|
||||
lines.append(f"{ticker}:")
|
||||
lines.append(f" P/E Ratio: {_fmt(m.price_to_earnings_ratio)}")
|
||||
lines.append(f" P/B Ratio: {_fmt(m.price_to_book_ratio)}")
|
||||
lines.append(f" P/S Ratio: {_fmt(m.price_to_sales_ratio)}")
|
||||
lines.append("")
|
||||
ticker_lines = [
|
||||
f"{ticker}:",
|
||||
f" P/E Ratio: {_fmt(m.price_to_earnings_ratio)}",
|
||||
f" P/B Ratio: {_fmt(m.price_to_book_ratio)}",
|
||||
f" P/S Ratio: {_fmt(m.price_to_sales_ratio)}\n",
|
||||
]
|
||||
return "\n".join(ticker_lines)
|
||||
except Exception as e:
|
||||
return f"{ticker}: Error - {str(e)}\n"
|
||||
|
||||
results = await asyncio.gather(*[_fetch_one(t) for t in tickers])
|
||||
lines.extend(results)
|
||||
|
||||
return _to_text_response("\n".join(lines))
|
||||
|
||||
|
||||
@safe
|
||||
def get_financial_metrics_tool(
|
||||
async def get_financial_metrics_tool(
|
||||
tickers: Optional[List[str]] = None,
|
||||
current_date: Optional[str] = None,
|
||||
period: str = "ttm",
|
||||
@@ -374,35 +396,35 @@ def get_financial_metrics_tool(
|
||||
f"=== Comprehensive Financial Metrics ({current_date}, {period}) ===\n",
|
||||
]
|
||||
|
||||
for ticker in tickers:
|
||||
metrics = get_financial_metrics(
|
||||
async def _fetch_one(ticker):
|
||||
try:
|
||||
# Offload synchronous data fetching to thread to keep loop snappy
|
||||
metrics = await asyncio.to_thread(
|
||||
get_financial_metrics,
|
||||
ticker=ticker,
|
||||
end_date=current_date,
|
||||
period=period,
|
||||
)
|
||||
if not metrics:
|
||||
lines.append(f"{ticker}: No data available\n")
|
||||
continue
|
||||
return f"{ticker}: No data available\n"
|
||||
|
||||
m = metrics[0]
|
||||
lines.append(f"{ticker}:")
|
||||
lines.append(f" Market Cap: ${_fmt(m.market_cap, ',.0f')}")
|
||||
lines.append(
|
||||
ticker_lines = [
|
||||
f"{ticker}:",
|
||||
f" Market Cap: ${_fmt(m.market_cap, ',.0f')}",
|
||||
f" P/E: {_fmt(m.price_to_earnings_ratio)} | P/B: {_fmt(m.price_to_book_ratio)} | P/S: {_fmt(m.price_to_sales_ratio)}",
|
||||
)
|
||||
lines.append(
|
||||
f" ROE: {_fmt(m.return_on_equity, '.1%')} | Net Margin: {_fmt(m.net_margin, '.1%')}",
|
||||
)
|
||||
lines.append(
|
||||
f" Revenue Growth: {_fmt(m.revenue_growth, '.1%')} | Earnings Growth: {_fmt(m.earnings_growth, '.1%')}",
|
||||
)
|
||||
lines.append(
|
||||
f" Current Ratio: {_fmt(m.current_ratio)} | D/E: {_fmt(m.debt_to_equity)}",
|
||||
)
|
||||
lines.append(
|
||||
f" EPS: ${_fmt(m.earnings_per_share)} | FCF/Share: ${_fmt(m.free_cash_flow_per_share)}",
|
||||
)
|
||||
lines.append("")
|
||||
f" EPS: ${_fmt(m.earnings_per_share)} | FCF/Share: ${_fmt(m.free_cash_flow_per_share)}\n",
|
||||
]
|
||||
return "\n".join(ticker_lines)
|
||||
except Exception as e:
|
||||
return f"{ticker}: Error fetching data - {str(e)}\n"
|
||||
|
||||
# Parallelize data retrieval for all tickers
|
||||
results = await asyncio.gather(*[_fetch_one(t) for t in tickers])
|
||||
lines.extend(results)
|
||||
|
||||
return _to_text_response("\n".join(lines))
|
||||
|
||||
|
||||
564
backend/tools/dynamic_team_tools.py
Normal file
@@ -0,0 +1,564 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Dynamic Team Management Tools - Tools for PM to manage analyst team dynamically.
|
||||
|
||||
This module provides tools for the Portfolio Manager to:
|
||||
- Create new analysts with custom configuration
|
||||
- Clone existing analysts with variations
|
||||
- Remove analysts from the team
|
||||
- List available analyst types
|
||||
- Get analyst information
|
||||
|
||||
These tools are registered with the PM's toolkit and enable dynamic team management
|
||||
as described in the Dynamic Team Architecture.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import Any, Dict, List, Optional, Callable
|
||||
|
||||
from agentscope.message import TextBlock
|
||||
from agentscope.tool import ToolResponse
|
||||
|
||||
from backend.agents.dynamic_team_types import (
|
||||
AnalystPersona,
|
||||
AnalystConfig,
|
||||
CreateAnalystResult,
|
||||
AnalystTypeInfo,
|
||||
)
|
||||
from backend.config.constants import ANALYST_TYPES, AGENT_CONFIG
|
||||
|
||||
|
||||
# Type alias for callbacks set by pipeline
|
||||
CreateAnalystCallback = Callable[[str, str, Optional[AnalystConfig]], str]
|
||||
RemoveAnalystCallback = Callable[[str], str]
|
||||
|
||||
|
||||
def _to_tool_response(payload: Any) -> ToolResponse:
|
||||
if isinstance(payload, str):
|
||||
text = payload
|
||||
else:
|
||||
text = json.dumps(payload, ensure_ascii=False, indent=2, default=str)
|
||||
return ToolResponse(content=[TextBlock(type="text", text=text)])
|
||||
|
||||
|
||||
class DynamicTeamController:
|
||||
"""Controller for dynamic analyst team management.
|
||||
|
||||
This class is instantiated by TradingPipeline and injected into the PM agent
|
||||
via set_team_controller(). It provides methods that the PM can call through
|
||||
tools to manage the analyst team dynamically.
|
||||
|
||||
Attributes:
|
||||
create_callback: Callback to _create_runtime_analyst in pipeline
|
||||
remove_callback: Callback to _remove_runtime_analyst in pipeline
|
||||
get_analysts_callback: Callback to get current analysts list
|
||||
registered_types: Runtime-registered custom analyst types
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
create_callback: CreateAnalystCallback,
|
||||
remove_callback: RemoveAnalystCallback,
|
||||
get_analysts_callback: Optional[Callable[[], List[Any]]] = None,
|
||||
):
|
||||
"""Initialize the controller with callbacks from pipeline.
|
||||
|
||||
Args:
|
||||
create_callback: Function to create a runtime analyst
|
||||
remove_callback: Function to remove a runtime analyst
|
||||
get_analysts_callback: Optional function to get current analysts
|
||||
"""
|
||||
self._create_callback = create_callback
|
||||
self._remove_callback = remove_callback
|
||||
self._get_analysts_callback = get_analysts_callback
|
||||
self._registered_types: Dict[str, AnalystPersona] = {}
|
||||
self._instance_configs: Dict[str, AnalystConfig] = {}
|
||||
|
||||
def create_analyst(
|
||||
self,
|
||||
agent_id: str,
|
||||
analyst_type: str,
|
||||
name: Optional[str] = None,
|
||||
focus: Optional[List[str]] = None,
|
||||
description: Optional[str] = None,
|
||||
soul_md: Optional[str] = None,
|
||||
agents_md: Optional[str] = None,
|
||||
model_name: Optional[str] = None,
|
||||
preferred_tools: Optional[List[str]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Create a new analyst with optional custom configuration.
|
||||
|
||||
This tool allows the Portfolio Manager to dynamically create new analysts
|
||||
during a trading session. The analyst can be based on a predefined type
|
||||
or fully customized with a unique persona.
|
||||
|
||||
Args:
|
||||
agent_id: Unique identifier for the new analyst (e.g., "crypto_specialist_01")
|
||||
analyst_type: Base type (e.g., "technical_analyst") or custom identifier
|
||||
name: Display name for the analyst (overrides default)
|
||||
focus: List of focus areas (overrides default)
|
||||
description: Detailed description (overrides default)
|
||||
soul_md: Custom SOUL.md content for the analyst's workspace
|
||||
agents_md: Custom AGENTS.md content
|
||||
model_name: Override the default LLM model
|
||||
preferred_tools: List of preferred tool categories
|
||||
|
||||
Returns:
|
||||
Dict with success status, message, and analyst info
|
||||
|
||||
Example:
|
||||
>>> result = create_analyst(
|
||||
... agent_id="options_specialist",
|
||||
... analyst_type="technical_analyst",
|
||||
... name="期权策略分析师",
|
||||
... focus=["期权定价", "波动率交易"],
|
||||
... description="专注于期权市场分析和波动率交易策略...",
|
||||
... )
|
||||
"""
|
||||
# Build custom config if any customization is provided
|
||||
custom_config = None
|
||||
if any([name, focus, description, soul_md, agents_md, model_name, preferred_tools]):
|
||||
persona = None
|
||||
if name or focus or description:
|
||||
persona = AnalystPersona(
|
||||
name=name or f"Custom {analyst_type}",
|
||||
focus=focus or ["General Analysis"],
|
||||
description=description or f"Custom analyst based on {analyst_type}",
|
||||
preferred_tools=preferred_tools,
|
||||
)
|
||||
|
||||
custom_config = AnalystConfig(
|
||||
persona=persona,
|
||||
analyst_type=analyst_type if analyst_type in ANALYST_TYPES else None,
|
||||
soul_md=soul_md,
|
||||
agents_md=agents_md,
|
||||
model_name=model_name,
|
||||
)
|
||||
|
||||
# Call the pipeline's create method
|
||||
result_message = self._create_callback(agent_id, analyst_type, custom_config)
|
||||
|
||||
# Parse result
|
||||
success = result_message.startswith("Created")
|
||||
if success:
|
||||
self._instance_configs[agent_id] = custom_config if custom_config else AnalystConfig(
|
||||
analyst_type=analyst_type
|
||||
)
|
||||
|
||||
return {
|
||||
"success": success,
|
||||
"agent_id": agent_id if success else None,
|
||||
"message": result_message,
|
||||
"analyst_type": analyst_type,
|
||||
}
|
||||
|
||||
def clone_analyst(
|
||||
self,
|
||||
source_id: str,
|
||||
new_id: str,
|
||||
name: Optional[str] = None,
|
||||
focus_additions: Optional[List[str]] = None,
|
||||
description_override: Optional[str] = None,
|
||||
model_name: Optional[str] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Clone an existing analyst with optional modifications.
|
||||
|
||||
Creates a new analyst by copying the configuration of an existing one
|
||||
and applying specified overrides. Useful for creating specialized
|
||||
variants (e.g., "crypto_technical" from "technical_analyst").
|
||||
|
||||
Args:
|
||||
source_id: ID of the analyst to clone
|
||||
new_id: Unique identifier for the new analyst
|
||||
name: New display name (if different from source)
|
||||
focus_additions: Additional focus areas to add
|
||||
description_override: Completely new description
|
||||
model_name: Override the model from source
|
||||
|
||||
Returns:
|
||||
Dict with success status, message, and new analyst info
|
||||
|
||||
Example:
|
||||
>>> result = clone_analyst(
|
||||
... source_id="technical_analyst",
|
||||
... new_id="crypto_technical_01",
|
||||
... name="加密货币技术分析师",
|
||||
... focus_additions=["链上数据", "DeFi协议分析"],
|
||||
... )
|
||||
"""
|
||||
# Get source config if available
|
||||
source_config = self._instance_configs.get(source_id)
|
||||
|
||||
# Determine base type and config
|
||||
if source_config:
|
||||
base_type = source_config.analyst_type or source_id
|
||||
base_persona = source_config.persona
|
||||
else:
|
||||
# Assume source_id is a known type
|
||||
base_type = source_id
|
||||
base_persona = None
|
||||
|
||||
# Build new persona
|
||||
new_focus = list(base_persona.focus) if base_persona else []
|
||||
if focus_additions:
|
||||
new_focus.extend(focus_additions)
|
||||
|
||||
new_name = name or (base_persona.name if base_persona else new_id)
|
||||
new_description = description_override or (base_persona.description if base_persona else "")
|
||||
|
||||
# Create new config with parent reference
|
||||
new_config = AnalystConfig(
|
||||
persona=AnalystPersona(
|
||||
name=new_name,
|
||||
focus=new_focus,
|
||||
description=new_description,
|
||||
preferred_tools=base_persona.preferred_tools if base_persona else None,
|
||||
),
|
||||
analyst_type=base_type if base_type in ANALYST_TYPES else None,
|
||||
soul_md=source_config.soul_md if source_config else None,
|
||||
agents_md=source_config.agents_md if source_config else None,
|
||||
model_name=model_name or (source_config.model_name if source_config else None),
|
||||
parent_id=source_id,
|
||||
)
|
||||
|
||||
# Create the new analyst
|
||||
result_message = self._create_callback(new_id, base_type, new_config)
|
||||
|
||||
success = result_message.startswith("Created")
|
||||
if success:
|
||||
self._instance_configs[new_id] = new_config
|
||||
|
||||
return {
|
||||
"success": success,
|
||||
"agent_id": new_id if success else None,
|
||||
"parent_id": source_id,
|
||||
"message": result_message,
|
||||
}
|
||||
|
||||
def remove_analyst(self, agent_id: str) -> Dict[str, Any]:
|
||||
"""Remove a dynamically created analyst from the team.
|
||||
|
||||
Args:
|
||||
agent_id: ID of the analyst to remove
|
||||
|
||||
Returns:
|
||||
Dict with success status and message
|
||||
|
||||
Example:
|
||||
>>> result = remove_analyst("options_specialist")
|
||||
"""
|
||||
result_message = self._remove_callback(agent_id)
|
||||
success = result_message.startswith("Removed") or "not found" not in result_message.lower()
|
||||
|
||||
if success and agent_id in self._instance_configs:
|
||||
del self._instance_configs[agent_id]
|
||||
|
||||
return {
|
||||
"success": success,
|
||||
"agent_id": agent_id,
|
||||
"message": result_message,
|
||||
}
|
||||
|
||||
def list_analyst_types(self) -> List[Dict[str, Any]]:
|
||||
"""List all available analyst types.
|
||||
|
||||
Returns a list of all available analyst types, including:
|
||||
- Built-in types from ANALYST_TYPES
|
||||
- Runtime registered custom types
|
||||
|
||||
Returns:
|
||||
List of analyst type information dictionaries
|
||||
|
||||
Example:
|
||||
>>> types = list_analyst_types()
|
||||
>>> print(types[0]["type_id"]) # "fundamentals_analyst"
|
||||
"""
|
||||
result = []
|
||||
|
||||
# Add built-in types
|
||||
for type_id, info in ANALYST_TYPES.items():
|
||||
result.append({
|
||||
"type_id": type_id,
|
||||
"name": info.get("display_name", type_id),
|
||||
"description": info.get("description", ""),
|
||||
"is_builtin": True,
|
||||
"source": "constants",
|
||||
})
|
||||
|
||||
# Add runtime registered types
|
||||
for type_id, persona in self._registered_types.items():
|
||||
result.append({
|
||||
"type_id": type_id,
|
||||
"name": persona.name,
|
||||
"description": persona.description,
|
||||
"is_builtin": False,
|
||||
"source": "runtime",
|
||||
})
|
||||
|
||||
return result
|
||||
|
||||
def get_analyst_info(self, agent_id: str) -> Dict[str, Any]:
|
||||
"""Get information about a specific analyst.
|
||||
|
||||
Args:
|
||||
agent_id: ID of the analyst
|
||||
|
||||
Returns:
|
||||
Dict with analyst configuration and status
|
||||
"""
|
||||
config = self._instance_configs.get(agent_id)
|
||||
current_analysts = self._get_analysts_callback() if self._get_analysts_callback else []
|
||||
analyst_map = {
|
||||
(getattr(agent, "name", None) or getattr(agent, "agent_id", None)): agent
|
||||
for agent in current_analysts
|
||||
}
|
||||
if agent_id in analyst_map and not config:
|
||||
builtin_meta = AGENT_CONFIG.get(agent_id, {})
|
||||
return {
|
||||
"found": True,
|
||||
"agent_id": agent_id,
|
||||
"name": builtin_meta.get("name") or agent_id,
|
||||
"type": agent_id,
|
||||
"is_custom": False,
|
||||
"is_clone": False,
|
||||
"is_builtin": True,
|
||||
"message": f"Built-in analyst '{agent_id}' is active",
|
||||
}
|
||||
if not config:
|
||||
return {
|
||||
"found": False,
|
||||
"agent_id": agent_id,
|
||||
"message": f"No configuration found for '{agent_id}'",
|
||||
}
|
||||
|
||||
return {
|
||||
"found": True,
|
||||
"agent_id": agent_id,
|
||||
"config": config.to_dict(),
|
||||
"is_custom": config.persona is not None,
|
||||
"is_clone": config.parent_id is not None,
|
||||
"parent_id": config.parent_id,
|
||||
"is_builtin": False,
|
||||
}
|
||||
|
||||
def register_analyst_type(
|
||||
self,
|
||||
type_id: str,
|
||||
name: str,
|
||||
focus: List[str],
|
||||
description: str,
|
||||
preferred_tools: Optional[List[str]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Register a new analyst type for later creation.
|
||||
|
||||
This allows defining reusable analyst personas that can be instantiated
|
||||
multiple times with different configurations.
|
||||
|
||||
Args:
|
||||
type_id: Unique identifier for this type (e.g., "options_analyst")
|
||||
name: Display name
|
||||
focus: List of focus areas
|
||||
description: Detailed description
|
||||
preferred_tools: Optional list of preferred tool categories
|
||||
|
||||
Returns:
|
||||
Dict with success status and type info
|
||||
|
||||
Example:
|
||||
>>> result = register_analyst_type(
|
||||
... type_id="options_analyst",
|
||||
... name="期权分析师",
|
||||
... focus=["期权定价", "希腊字母分析"],
|
||||
... description="专注于期权策略和波动率分析",
|
||||
... )
|
||||
"""
|
||||
if type_id in self._registered_types or type_id in ANALYST_TYPES:
|
||||
return {
|
||||
"success": False,
|
||||
"type_id": type_id,
|
||||
"message": f"Type '{type_id}' already exists",
|
||||
}
|
||||
|
||||
persona = AnalystPersona(
|
||||
name=name,
|
||||
focus=focus,
|
||||
description=description,
|
||||
preferred_tools=preferred_tools,
|
||||
)
|
||||
self._registered_types[type_id] = persona
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"type_id": type_id,
|
||||
"persona": persona.to_dict(),
|
||||
"message": f"Registered new analyst type '{type_id}'",
|
||||
}
|
||||
|
||||
def get_team_summary(self) -> Dict[str, Any]:
|
||||
"""Get a summary of the current analyst team.
|
||||
|
||||
Returns:
|
||||
Dict with team composition information
|
||||
"""
|
||||
analysts = []
|
||||
current_analysts = self._get_analysts_callback() if self._get_analysts_callback else []
|
||||
instance_configs = self._instance_configs
|
||||
|
||||
for agent in current_analysts:
|
||||
agent_id = getattr(agent, "name", None) or getattr(agent, "agent_id", None)
|
||||
if not agent_id:
|
||||
continue
|
||||
config = instance_configs.get(agent_id)
|
||||
builtin_meta = AGENT_CONFIG.get(agent_id, {})
|
||||
analysts.append({
|
||||
"agent_id": agent_id,
|
||||
"name": (
|
||||
config.persona.name
|
||||
if config and config.persona and config.persona.name
|
||||
else builtin_meta.get("name") or agent_id
|
||||
),
|
||||
"type": config.analyst_type if config else agent_id,
|
||||
"is_custom": bool(config and config.persona is not None),
|
||||
"is_clone": bool(config and config.parent_id is not None),
|
||||
"is_builtin": config is None,
|
||||
})
|
||||
|
||||
return {
|
||||
"total_analysts": len(analysts),
|
||||
"custom_analysts": len([a for a in analysts if a["is_custom"]]),
|
||||
"cloned_analysts": len([a for a in analysts if a["is_clone"]]),
|
||||
"analysts": analysts,
|
||||
"registered_types": len(self._registered_types),
|
||||
}
|
||||
|
||||
|
||||
# Global controller instance - set by pipeline
|
||||
_controller_instance: Optional[DynamicTeamController] = None
|
||||
|
||||
|
||||
def set_controller(controller: DynamicTeamController) -> None:
|
||||
"""Set the global controller instance.
|
||||
|
||||
Called by TradingPipeline when initializing the PM agent.
|
||||
"""
|
||||
global _controller_instance
|
||||
_controller_instance = controller
|
||||
|
||||
|
||||
def get_controller() -> Optional[DynamicTeamController]:
|
||||
"""Get the global controller instance.
|
||||
|
||||
Returns:
|
||||
DynamicTeamController instance or None if not set
|
||||
"""
|
||||
return _controller_instance
|
||||
|
||||
|
||||
# Tool functions that wrap the controller methods
|
||||
# These are registered with the PM's toolkit
|
||||
|
||||
def create_analyst(
|
||||
agent_id: str,
|
||||
analyst_type: str,
|
||||
name: str = "",
|
||||
focus: str = "",
|
||||
description: str = "",
|
||||
soul_md: str = "",
|
||||
agents_md: str = "",
|
||||
model_name: str = "",
|
||||
) -> ToolResponse:
|
||||
"""Tool wrapper for create_analyst.
|
||||
|
||||
Note: focus parameter accepts comma-separated string for tool compatibility.
|
||||
"""
|
||||
controller = get_controller()
|
||||
if not controller:
|
||||
return _to_tool_response({"success": False, "error": "Dynamic team controller not available"})
|
||||
|
||||
focus_list = [f.strip() for f in focus.split(",")] if focus else None
|
||||
return _to_tool_response(
|
||||
controller.create_analyst(
|
||||
agent_id=agent_id,
|
||||
analyst_type=analyst_type,
|
||||
name=name,
|
||||
focus=focus_list,
|
||||
description=description,
|
||||
soul_md=soul_md,
|
||||
agents_md=agents_md,
|
||||
model_name=model_name,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def clone_analyst(
|
||||
source_id: str,
|
||||
new_id: str,
|
||||
name: str = "",
|
||||
focus_additions: str = "",
|
||||
description_override: str = "",
|
||||
model_name: str = "",
|
||||
) -> ToolResponse:
|
||||
"""Tool wrapper for clone_analyst.
|
||||
|
||||
Note: focus_additions accepts comma-separated string.
|
||||
"""
|
||||
controller = get_controller()
|
||||
if not controller:
|
||||
return _to_tool_response({"success": False, "error": "Dynamic team controller not available"})
|
||||
|
||||
additions_list = [f.strip() for f in focus_additions.split(",")] if focus_additions else None
|
||||
return _to_tool_response(
|
||||
controller.clone_analyst(
|
||||
source_id=source_id,
|
||||
new_id=new_id,
|
||||
name=name,
|
||||
focus_additions=additions_list,
|
||||
description_override=description_override,
|
||||
model_name=model_name,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def remove_analyst(agent_id: str) -> ToolResponse:
|
||||
"""Tool wrapper for remove_analyst."""
|
||||
controller = get_controller()
|
||||
if not controller:
|
||||
return _to_tool_response({"success": False, "error": "Dynamic team controller not available"})
|
||||
return _to_tool_response(controller.remove_analyst(agent_id))
|
||||
|
||||
|
||||
def list_analyst_types() -> ToolResponse:
|
||||
"""Tool wrapper for list_analyst_types."""
|
||||
controller = get_controller()
|
||||
if not controller:
|
||||
return _to_tool_response([])
|
||||
return _to_tool_response(controller.list_analyst_types())
|
||||
|
||||
|
||||
def get_analyst_info(agent_id: str) -> ToolResponse:
|
||||
"""Tool wrapper for get_analyst_info."""
|
||||
controller = get_controller()
|
||||
if not controller:
|
||||
return _to_tool_response({"found": False, "error": "Controller not available"})
|
||||
return _to_tool_response(controller.get_analyst_info(agent_id))
|
||||
|
||||
|
||||
def get_team_summary() -> ToolResponse:
|
||||
"""Tool wrapper for get_team_summary."""
|
||||
controller = get_controller()
|
||||
if not controller:
|
||||
return _to_tool_response({"error": "Controller not available"})
|
||||
return _to_tool_response(controller.get_team_summary())
|
||||
|
||||
|
||||
__all__ = [
|
||||
"DynamicTeamController",
|
||||
"set_controller",
|
||||
"get_controller",
|
||||
"create_analyst",
|
||||
"clone_analyst",
|
||||
"remove_analyst",
|
||||
"list_analyst_types",
|
||||
"get_analyst_info",
|
||||
"get_team_summary",
|
||||
]
|
||||
@@ -19,7 +19,6 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import warnings
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any
|
||||
|
||||
@@ -71,7 +70,6 @@ class NoSandboxBackend(SandboxBackend):
|
||||
|
||||
def __init__(self):
|
||||
self._module_cache = {}
|
||||
self._warning_shown = False
|
||||
|
||||
def _get_script_name(self, function_name: str) -> str:
|
||||
"""
|
||||
@@ -96,19 +94,6 @@ class NoSandboxBackend(SandboxBackend):
|
||||
) -> dict:
|
||||
"""直接导入模块并执行函数"""
|
||||
|
||||
# 首次使用时显示安全警告
|
||||
if not self._warning_shown:
|
||||
warnings.warn(
|
||||
"\n" + "=" * 60 + "\n"
|
||||
"⚠️ [安全警告] 技能在无沙盒模式下运行 (SKILL_SANDBOX_MODE=none)\n"
|
||||
" 技能脚本将直接在当前进程中执行,无隔离保护。\n"
|
||||
" 建议:生产环境请设置 SKILL_SANDBOX_MODE=docker\n"
|
||||
"=" * 60,
|
||||
RuntimeWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
self._warning_shown = True
|
||||
|
||||
logger.debug(f"[NoSandbox] 执行技能: {skill_name}.{function_name}")
|
||||
|
||||
try:
|
||||
@@ -345,13 +330,13 @@ class SkillSandbox:
|
||||
self._backend = self._create_backend()
|
||||
self._initialized = True
|
||||
|
||||
logger.info(f"SkillSandbox 初始化完成,模式: {self.mode}")
|
||||
logger.debug(f"SkillSandbox 初始化完成,模式: {self.mode}")
|
||||
|
||||
def _create_backend(self) -> SandboxBackend:
|
||||
"""根据模式创建对应后端"""
|
||||
|
||||
if self.mode == "none":
|
||||
logger.info("使用无沙盒模式(直接执行)")
|
||||
logger.debug("使用无沙盒模式(直接执行)")
|
||||
return NoSandboxBackend()
|
||||
|
||||
elif self.mode == "docker":
|
||||
|
||||
@@ -10,37 +10,12 @@ For the planned convergence work, see
|
||||
|
||||
## Contents
|
||||
|
||||
- [deploy/systemd/evotraders.service](./systemd/evotraders.service)
|
||||
- systemd unit for the long-running 大时代 gateway process
|
||||
- [scripts/run_prod.sh](../scripts/run_prod.sh)
|
||||
- production launch script used by the systemd unit
|
||||
- [deploy/nginx/bigtime.cillinn.com.conf](./nginx/bigtime.cillinn.com.conf)
|
||||
- HTTPS nginx config with WebSocket proxying
|
||||
- [deploy/nginx/bigtime.cillinn.com.http.conf](./nginx/bigtime.cillinn.com.http.conf)
|
||||
- plain HTTP/static-site variant
|
||||
|
||||
## Deployment Topology Options
|
||||
|
||||
This directory documents two deployment topologies:
|
||||
|
||||
### 1. Compatibility Topology (backend.main) - CURRENT PRODUCTION DEFAULT
|
||||
|
||||
The checked-in production path uses the **compatibility gateway** (`backend.main`):
|
||||
|
||||
- nginx serves the built frontend from `/var/www/bigtime/current`
|
||||
- public domain examples use `bigtime.cillinn.com`
|
||||
- nginx proxies `/ws` to `127.0.0.1:8765`
|
||||
- systemd runs `scripts/run_prod.sh`
|
||||
- `scripts/run_prod.sh` starts `python3 -m backend.main` in live mode on `127.0.0.1:8765`
|
||||
|
||||
This is a **monolithic gateway** that embeds all services internally. It is the
|
||||
current production default for simplicity but does not expose the split FastAPI
|
||||
services directly.
|
||||
|
||||
**When to use**: Single-server deployments, simpler operational requirements,
|
||||
backwards compatibility with existing monitoring.
|
||||
|
||||
### 2. Preferred Topology (Split Services) - RECOMMENDED FOR NEW DEPLOYMENTS
|
||||
## Recommended Topology
|
||||
|
||||
The modern architecture exposes individual FastAPI services:
|
||||
|
||||
@@ -76,27 +51,9 @@ To deploy in split-service mode, you would:
|
||||
|
||||
## systemd
|
||||
|
||||
The current systemd unit:
|
||||
|
||||
- uses `WorkingDirectory=/root/code/evotraders`
|
||||
- executes [scripts/run_prod.sh](../scripts/run_prod.sh)
|
||||
- restarts automatically on failure
|
||||
|
||||
Enable and start:
|
||||
|
||||
```bash
|
||||
sudo cp deploy/systemd/evotraders.service /etc/systemd/system/evotraders.service
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable evotraders
|
||||
sudo systemctl start evotraders
|
||||
```
|
||||
|
||||
Check status and logs:
|
||||
|
||||
```bash
|
||||
sudo systemctl status evotraders
|
||||
journalctl -u evotraders -f
|
||||
```
|
||||
No maintained systemd unit is checked into the repository anymore. If deployment
|
||||
work resumes, add units that mirror the split-service topology used in local
|
||||
development.
|
||||
|
||||
## nginx
|
||||
|
||||
@@ -124,9 +81,9 @@ The checked-in TLS config expects Let's Encrypt assets at:
|
||||
Before using the production scripts, ensure the runtime environment has:
|
||||
|
||||
- a usable Python environment
|
||||
- backend dependencies installed from `requirements.txt`
|
||||
- backend dependencies installed from the checked-in Python package metadata in `pyproject.toml`
|
||||
- the package installed with `pip install -e .` or `uv pip install -e .`
|
||||
- frontend dependencies installed with `npm ci`
|
||||
- frontend dependencies installed with `npm install`
|
||||
- repo dependencies installed
|
||||
- required market/model API keys
|
||||
- any desired `TICKERS` override
|
||||
@@ -136,28 +93,8 @@ Recommended production install sequence:
|
||||
```bash
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
pip install -e .
|
||||
cd frontend && npm ci && npm run build && cd ..
|
||||
```
|
||||
|
||||
The production script currently sets:
|
||||
|
||||
```bash
|
||||
PYTHONPATH=/root/code/evotraders/.pydeps:.
|
||||
TICKERS=${TICKERS:-AAPL,MSFT,GOOGL,AMZN,NVDA,META,TSLA,AMD,NFLX,AVGO,PLTR,COIN}
|
||||
```
|
||||
|
||||
It then launches the current compatibility gateway/runtime process:
|
||||
|
||||
```bash
|
||||
python3 -m backend.main \
|
||||
--mode live \
|
||||
--config-name production \
|
||||
--host 127.0.0.1 \
|
||||
--port 8765 \
|
||||
--trigger-time now \
|
||||
--poll-interval 15
|
||||
cd frontend && npm install && npm run build && cd ..
|
||||
```
|
||||
|
||||
## Skill Sandbox Configuration
|
||||
@@ -172,7 +109,7 @@ pip install -e ".[docker-sandbox]"
|
||||
docker info
|
||||
```
|
||||
|
||||
Environment variables (set by `scripts/run_prod.sh` with defaults):
|
||||
Example environment variables for a future deployment:
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
@@ -196,6 +133,5 @@ The checked-in deployment artifacts do not currently document or automate:
|
||||
- frontend build/publish steps
|
||||
- secret management
|
||||
|
||||
If you move production fully to split-service mode, update this directory so it
|
||||
documents the new service topology explicitly instead of relying on the gateway-
|
||||
only path.
|
||||
If deployment returns to active development, rewrite this directory around the
|
||||
same split-service topology used by `start-dev.sh`.
|
||||
|
||||
656
deploy/install-production.sh
Executable file
@@ -0,0 +1,656 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
RED='\033[0;31m'
|
||||
CYAN='\033[0;36m'
|
||||
NC='\033[0m'
|
||||
|
||||
NON_INTERACTIVE=false
|
||||
AUTO_INSTALL_DEPS=""
|
||||
AUTO_INSTALL_SYSTEMD=""
|
||||
AUTO_START_SYSTEMD=""
|
||||
AUTO_INSTALL_NGINX=""
|
||||
AUTO_RELOAD_NGINX=""
|
||||
AUTO_USE_TLS=""
|
||||
AUTO_USE_DOCKER=""
|
||||
|
||||
log() {
|
||||
echo -e "${GREEN}[bigtime]${NC} $*"
|
||||
}
|
||||
|
||||
warn() {
|
||||
echo -e "${YELLOW}[bigtime]${NC} $*"
|
||||
}
|
||||
|
||||
fail() {
|
||||
echo -e "${RED}[bigtime]${NC} $*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
ask() {
|
||||
local prompt="$1"
|
||||
local default="${2:-}"
|
||||
if ${NON_INTERACTIVE}; then
|
||||
printf '%s' "${default}"
|
||||
return
|
||||
fi
|
||||
local value
|
||||
if [[ -n "${default}" ]]; then
|
||||
read -r -p "${prompt} [${default}]: " value
|
||||
printf '%s' "${value:-$default}"
|
||||
else
|
||||
read -r -p "${prompt}: " value
|
||||
printf '%s' "${value}"
|
||||
fi
|
||||
}
|
||||
|
||||
ask_required() {
|
||||
local prompt="$1"
|
||||
local default="${2:-}"
|
||||
local value=""
|
||||
while [[ -z "${value}" ]]; do
|
||||
value="$(ask "${prompt}" "${default}")"
|
||||
if [[ -z "${value}" ]]; then
|
||||
warn "该项不能为空,请重新输入。"
|
||||
fi
|
||||
done
|
||||
printf '%s' "${value}"
|
||||
}
|
||||
|
||||
validate_domain_like() {
|
||||
local value="$1"
|
||||
[[ -z "${value}" ]] && return 1
|
||||
[[ "${value}" =~ ^[A-Za-z0-9.-]+$ ]]
|
||||
}
|
||||
|
||||
validate_file_parent_exists_or_rootable() {
|
||||
local value="$1"
|
||||
local parent
|
||||
parent="$(dirname "${value}")"
|
||||
[[ -d "${parent}" ]] || [[ "${parent}" == "/etc/bigtime" ]] || [[ "${parent}" == "/etc/nginx/conf.d" ]]
|
||||
}
|
||||
|
||||
validate_numeric() {
|
||||
local value="$1"
|
||||
[[ "${value}" =~ ^[0-9]+([.][0-9]+)?$ ]]
|
||||
}
|
||||
|
||||
confirm() {
|
||||
local prompt="$1"
|
||||
local default="${2:-Y}"
|
||||
local override="${3:-}"
|
||||
if [[ -n "${override}" ]]; then
|
||||
[[ "${override}" =~ ^[Yy]([Ee][Ss])?$|^true$|^1$ ]]
|
||||
return
|
||||
fi
|
||||
if ${NON_INTERACTIVE}; then
|
||||
[[ "${default}" == "Y" ]]
|
||||
return
|
||||
fi
|
||||
local suffix="[Y/n]"
|
||||
[[ "${default}" == "N" ]] && suffix="[y/N]"
|
||||
local value
|
||||
read -r -p "${prompt} ${suffix}: " value
|
||||
value="${value:-$default}"
|
||||
[[ "${value}" =~ ^[Yy]$ ]]
|
||||
}
|
||||
|
||||
command_exists() {
|
||||
command -v "$1" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
detect_pkg_manager() {
|
||||
if command_exists apt-get; then
|
||||
echo "apt"
|
||||
return
|
||||
fi
|
||||
if command_exists dnf; then
|
||||
echo "dnf"
|
||||
return
|
||||
fi
|
||||
if command_exists yum; then
|
||||
echo "yum"
|
||||
return
|
||||
fi
|
||||
echo ""
|
||||
}
|
||||
|
||||
install_packages() {
|
||||
local pkg_manager="$1"
|
||||
case "${pkg_manager}" in
|
||||
apt)
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y python3 python3-venv python3-pip nginx curl git build-essential nodejs npm
|
||||
;;
|
||||
dnf)
|
||||
sudo dnf install -y python3 python3-pip nginx curl git gcc-c++ make nodejs npm
|
||||
;;
|
||||
yum)
|
||||
sudo yum install -y python3 python3-pip nginx curl git gcc-c++ make nodejs npm
|
||||
;;
|
||||
*)
|
||||
warn "未识别包管理器,跳过依赖安装。请手动安装 python3、venv、pip、nginx、node、npm。"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
render_systemd_unit() {
|
||||
local service_name="$1"
|
||||
local app_module="$2"
|
||||
local port="$3"
|
||||
local workers="$4"
|
||||
local memory_max="$5"
|
||||
local unit_path="$6"
|
||||
|
||||
sudo tee "${unit_path}" >/dev/null <<EOF
|
||||
[Unit]
|
||||
Description=BigTime ${service_name}
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=${SERVICE_USER}
|
||||
Group=${SERVICE_GROUP}
|
||||
WorkingDirectory=${APP_DIR}
|
||||
EnvironmentFile=${ENV_FILE}
|
||||
ExecStart=${PYTHON_BIN} -m uvicorn ${app_module} --host 127.0.0.1 --port ${port} --workers ${workers} --log-level warning --no-access-log
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
TimeoutStopSec=30
|
||||
KillMode=mixed
|
||||
NoNewPrivileges=true
|
||||
PrivateTmp=true
|
||||
ProtectSystem=full
|
||||
ProtectHome=false
|
||||
LimitNOFILE=65535
|
||||
TasksMax=4096
|
||||
MemoryMax=${memory_max}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
}
|
||||
|
||||
render_nginx_conf() {
|
||||
local target="$1"
|
||||
local use_tls="$2"
|
||||
if [[ "${use_tls}" == "yes" ]]; then
|
||||
sudo tee "${target}" >/dev/null <<EOF
|
||||
server {
|
||||
listen 80;
|
||||
server_name ${DOMAIN};
|
||||
|
||||
root ${APP_DIR}/frontend/dist;
|
||||
|
||||
location /.well-known/acme-challenge/ {
|
||||
allow all;
|
||||
}
|
||||
|
||||
location / {
|
||||
return 301 https://\$host\$request_uri;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
listen 443 ssl http2;
|
||||
server_name ${DOMAIN};
|
||||
|
||||
root ${APP_DIR}/frontend/dist;
|
||||
index index.html;
|
||||
|
||||
ssl_certificate ${SSL_CERT_PATH};
|
||||
ssl_certificate_key ${SSL_KEY_PATH};
|
||||
include /etc/letsencrypt/options-ssl-nginx.conf;
|
||||
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
|
||||
|
||||
location /ws {
|
||||
proxy_pass http://127.0.0.1:8765;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade \$http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location /api/runtime/ {
|
||||
proxy_pass http://127.0.0.1:8003;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location /api/dynamic-team/ {
|
||||
proxy_pass http://127.0.0.1:8003;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location /api/trading/ {
|
||||
proxy_pass http://127.0.0.1:8001;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location /api/news/ {
|
||||
proxy_pass http://127.0.0.1:8002;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location /api/ {
|
||||
proxy_pass http://127.0.0.1:8000;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location / {
|
||||
try_files \$uri \$uri/ /index.html;
|
||||
}
|
||||
}
|
||||
EOF
|
||||
else
|
||||
sudo tee "${target}" >/dev/null <<EOF
|
||||
server {
|
||||
listen 80;
|
||||
server_name ${DOMAIN};
|
||||
|
||||
root ${APP_DIR}/frontend/dist;
|
||||
index index.html;
|
||||
|
||||
location /.well-known/acme-challenge/ {
|
||||
allow all;
|
||||
}
|
||||
|
||||
location /ws {
|
||||
proxy_pass http://127.0.0.1:8765;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade \$http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location /api/runtime/ {
|
||||
proxy_pass http://127.0.0.1:8003;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location /api/dynamic-team/ {
|
||||
proxy_pass http://127.0.0.1:8003;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location /api/trading/ {
|
||||
proxy_pass http://127.0.0.1:8001;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location /api/news/ {
|
||||
proxy_pass http://127.0.0.1:8002;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location /api/ {
|
||||
proxy_pass http://127.0.0.1:8000;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location / {
|
||||
try_files \$uri \$uri/ /index.html;
|
||||
}
|
||||
}
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
write_env_file() {
|
||||
sudo mkdir -p "$(dirname "${ENV_FILE}")"
|
||||
sudo tee "${ENV_FILE}" >/dev/null <<EOF
|
||||
AGENT_SERVICE_URL=http://127.0.0.1:8000
|
||||
TRADING_SERVICE_URL=http://127.0.0.1:8001
|
||||
NEWS_SERVICE_URL=http://127.0.0.1:8002
|
||||
RUNTIME_SERVICE_URL=http://127.0.0.1:8003
|
||||
|
||||
TICKERS=${TICKERS}
|
||||
FIN_DATA_SOURCE=${FIN_DATA_SOURCE}
|
||||
FINANCIAL_DATASETS_API_KEY=${FINANCIAL_DATASETS_API_KEY}
|
||||
FINNHUB_API_KEY=${FINNHUB_API_KEY}
|
||||
POLYGON_API_KEY=${POLYGON_API_KEY}
|
||||
OPENAI_API_KEY=${OPENAI_API_KEY}
|
||||
OPENAI_BASE_URL=${OPENAI_BASE_URL}
|
||||
DASHSCOPE_API_KEY=${DASHSCOPE_API_KEY}
|
||||
MODEL_NAME=${MODEL_NAME}
|
||||
MEMORY_API_KEY=${MEMORY_API_KEY}
|
||||
SKILL_SANDBOX_MODE=${SKILL_SANDBOX_MODE}
|
||||
MAX_COMM_CYCLES=${MAX_COMM_CYCLES}
|
||||
MARGIN_REQUIREMENT=${MARGIN_REQUIREMENT}
|
||||
EOF
|
||||
}
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Usage:
|
||||
./deploy/install-production.sh [options]
|
||||
|
||||
Options:
|
||||
--non-interactive Run with defaults / env overrides only
|
||||
--app-dir PATH Application directory
|
||||
--service-user USER systemd service user
|
||||
--service-group GROUP systemd service group
|
||||
--domain DOMAIN Public domain
|
||||
--env-file PATH Environment file path
|
||||
--python-bin PATH Python executable path
|
||||
--tickers CSV Default tickers
|
||||
--fin-data-source NAME finnhub/yfinance/financial_datasets
|
||||
--model-name NAME Default model name
|
||||
--max-comm-cycles N Conference rounds
|
||||
--margin-requirement NUM Margin requirement
|
||||
--use-docker-sandbox Set SKILL_SANDBOX_MODE=docker
|
||||
--no-docker-sandbox Set SKILL_SANDBOX_MODE=none
|
||||
--with-tls Generate HTTPS nginx config
|
||||
--without-tls Generate HTTP nginx config
|
||||
--install-deps Auto install dependencies
|
||||
--skip-install-deps Skip dependency installation
|
||||
--install-systemd Install systemd units
|
||||
--skip-install-systemd Skip systemd unit installation
|
||||
--start-systemd Enable/start services
|
||||
--skip-start-systemd Do not start services
|
||||
--install-nginx Install nginx config
|
||||
--skip-install-nginx Skip nginx config installation
|
||||
--reload-nginx Run nginx -t and reload
|
||||
--skip-reload-nginx Skip nginx reload
|
||||
--ssl-cert-path PATH TLS certificate path
|
||||
--ssl-key-path PATH TLS key path
|
||||
--help Show this help
|
||||
EOF
|
||||
}
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--non-interactive) NON_INTERACTIVE=true ;;
|
||||
--app-dir) APP_DIR="${2:?missing value}"; shift ;;
|
||||
--service-user) SERVICE_USER="${2:?missing value}"; shift ;;
|
||||
--service-group) SERVICE_GROUP="${2:?missing value}"; shift ;;
|
||||
--domain) DOMAIN="${2:?missing value}"; shift ;;
|
||||
--env-file) ENV_FILE="${2:?missing value}"; shift ;;
|
||||
--python-bin) PYTHON_BIN="${2:?missing value}"; shift ;;
|
||||
--tickers) TICKERS="${2:?missing value}"; shift ;;
|
||||
--fin-data-source) FIN_DATA_SOURCE="${2:?missing value}"; shift ;;
|
||||
--model-name) MODEL_NAME="${2:?missing value}"; shift ;;
|
||||
--max-comm-cycles) MAX_COMM_CYCLES="${2:?missing value}"; shift ;;
|
||||
--margin-requirement) MARGIN_REQUIREMENT="${2:?missing value}"; shift ;;
|
||||
--use-docker-sandbox) AUTO_USE_DOCKER="Y" ;;
|
||||
--no-docker-sandbox) AUTO_USE_DOCKER="N" ;;
|
||||
--with-tls) AUTO_USE_TLS="Y" ;;
|
||||
--without-tls) AUTO_USE_TLS="N" ;;
|
||||
--install-deps) AUTO_INSTALL_DEPS="Y" ;;
|
||||
--skip-install-deps) AUTO_INSTALL_DEPS="N" ;;
|
||||
--install-systemd) AUTO_INSTALL_SYSTEMD="Y" ;;
|
||||
--skip-install-systemd) AUTO_INSTALL_SYSTEMD="N" ;;
|
||||
--start-systemd) AUTO_START_SYSTEMD="Y" ;;
|
||||
--skip-start-systemd) AUTO_START_SYSTEMD="N" ;;
|
||||
--install-nginx) AUTO_INSTALL_NGINX="Y" ;;
|
||||
--skip-install-nginx) AUTO_INSTALL_NGINX="N" ;;
|
||||
--reload-nginx) AUTO_RELOAD_NGINX="Y" ;;
|
||||
--skip-reload-nginx) AUTO_RELOAD_NGINX="N" ;;
|
||||
--ssl-cert-path) SSL_CERT_PATH="${2:?missing value}"; shift ;;
|
||||
--ssl-key-path) SSL_KEY_PATH="${2:?missing value}"; shift ;;
|
||||
--help) usage; exit 0 ;;
|
||||
*) fail "Unknown option: $1" ;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
main() {
|
||||
echo -e "${CYAN}BigTime 生产部署向导${NC}"
|
||||
echo ""
|
||||
echo -e "${YELLOW}说明:${NC} 这个脚本适合从一台空机器开始部署当前项目。"
|
||||
echo -e "${YELLOW}默认推荐:${NC} split-service + systemd + nginx + 静态前端。"
|
||||
echo ""
|
||||
|
||||
if confirm "尝试自动安装基础依赖(python3/nginx/node 等)?" "Y" "${AUTO_INSTALL_DEPS}"; then
|
||||
PKG_MANAGER="$(detect_pkg_manager)"
|
||||
install_packages "${PKG_MANAGER}"
|
||||
fi
|
||||
|
||||
echo -e "${CYAN}基础配置${NC}"
|
||||
APP_DIR="${APP_DIR:-$(ask_required '应用部署目录(仓库根目录,建议绝对路径)' "${REPO_ROOT}")}"
|
||||
[[ -d "${APP_DIR}" ]] || fail "应用目录不存在: ${APP_DIR}"
|
||||
|
||||
SERVICE_USER="${SERVICE_USER:-$(ask_required 'systemd 运行用户' "$(id -un)")}"
|
||||
id "${SERVICE_USER}" >/dev/null 2>&1 || warn "用户 ${SERVICE_USER} 当前不存在,请确认后续 systemd 配置。"
|
||||
|
||||
SERVICE_GROUP="${SERVICE_GROUP:-$(ask_required 'systemd 运行用户组' "$(id -gn)")}"
|
||||
|
||||
# 自动尝试获取公网 IP 作为默认域名值
|
||||
local detected_ip=""
|
||||
if [[ -z "${DOMAIN:-}" ]]; then
|
||||
log "正在尝试自动获取公网 IP..."
|
||||
detected_ip=$(curl -s --connect-timeout 5 https://ifconfig.me || curl -s --connect-timeout 5 https://api.ipify.org || echo "")
|
||||
if [[ -n "${detected_ip}" ]]; then
|
||||
log "自动检测到公网 IP: ${detected_ip}"
|
||||
fi
|
||||
fi
|
||||
|
||||
DOMAIN="${DOMAIN:-$(ask_required '部署域名(可填写 IP 或 localhost)' "${detected_ip:-localhost}")}"
|
||||
validate_domain_like "${DOMAIN}" || warn "域名/IP 形态看起来不标准,请再次确认: ${DOMAIN}"
|
||||
|
||||
ENV_FILE="${ENV_FILE:-$(ask_required '环境变量文件路径' '/etc/bigtime/bigtime.env')}"
|
||||
validate_file_parent_exists_or_rootable "${ENV_FILE}" || warn "环境文件父目录当前不存在,脚本会尝试创建: $(dirname "${ENV_FILE}")"
|
||||
|
||||
PYTHON_BIN="${PYTHON_BIN:-$(ask 'Python 可执行文件路径' "${APP_DIR}/.venv/bin/python")}"
|
||||
[[ -n "${PYTHON_BIN}" ]] || fail "Python 路径不能为空"
|
||||
|
||||
local SKIP_ENV_CONFIG=false
|
||||
if [[ -f "${ENV_FILE}" ]]; then
|
||||
echo ""
|
||||
if confirm "检测到环境变量文件 ${ENV_FILE} 已存在,是否跳过详细参数配置并保留现有文件?" "Y"; then
|
||||
SKIP_ENV_CONFIG=true
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! ${SKIP_ENV_CONFIG}; then
|
||||
echo ""
|
||||
echo -e "${CYAN}运行参数${NC}"
|
||||
TICKERS="${TICKERS:-$(ask '默认股票池(逗号分隔)' 'AAPL,MSFT,GOOGL,AMZN,NVDA,META,TSLA,AMD,NFLX,AVGO,PLTR,COIN')}"
|
||||
FIN_DATA_SOURCE="${FIN_DATA_SOURCE:-$(ask '行情数据源(finnhub/yfinance/financial_datasets)' 'finnhub')}"
|
||||
MODEL_NAME="${MODEL_NAME:-$(ask '默认模型名' 'qwen3-max')}"
|
||||
MAX_COMM_CYCLES="${MAX_COMM_CYCLES:-$(ask_required '最大讨论轮数' '2')}"
|
||||
validate_numeric "${MAX_COMM_CYCLES}" || fail "最大讨论轮数必须是数字: ${MAX_COMM_CYCLES}"
|
||||
MARGIN_REQUIREMENT="${MARGIN_REQUIREMENT:-$(ask_required '保证金比例' '0.5')}"
|
||||
validate_numeric "${MARGIN_REQUIREMENT}" || fail "保证金比例必须是数字: ${MARGIN_REQUIREMENT}"
|
||||
|
||||
echo ""
|
||||
echo -e "${CYAN}密钥配置${NC}"
|
||||
FINANCIAL_DATASETS_API_KEY="${FINANCIAL_DATASETS_API_KEY:-$(ask 'FINANCIAL_DATASETS_API_KEY(可留空)' '')}"
|
||||
FINNHUB_API_KEY="${FINNHUB_API_KEY:-$(ask 'FINNHUB_API_KEY(live 模式建议填写)' '')}"
|
||||
POLYGON_API_KEY="${POLYGON_API_KEY:-$(ask 'POLYGON_API_KEY(可留空)' '')}"
|
||||
OPENAI_API_KEY="${OPENAI_API_KEY:-$(ask 'OPENAI_API_KEY(可留空)' '')}"
|
||||
OPENAI_BASE_URL="${OPENAI_BASE_URL:-$(ask 'OPENAI_BASE_URL(可留空)' '')}"
|
||||
DASHSCOPE_API_KEY="${DASHSCOPE_API_KEY:-$(ask 'DASHSCOPE_API_KEY(可留空)' '')}"
|
||||
MEMORY_API_KEY="${MEMORY_API_KEY:-$(ask 'MEMORY_API_KEY(可留空)' '')}"
|
||||
|
||||
if [[ "${FIN_DATA_SOURCE}" == "finnhub" && -z "${FINNHUB_API_KEY}" ]]; then
|
||||
warn "你选择了 finnhub 作为数据源,但 FINNHUB_API_KEY 为空。live 模式下通常会失败。"
|
||||
fi
|
||||
if [[ -z "${OPENAI_API_KEY}" && -z "${DASHSCOPE_API_KEY}" ]]; then
|
||||
warn "OPENAI_API_KEY 和 DASHSCOPE_API_KEY 都为空,模型调用可能无法工作。"
|
||||
fi
|
||||
|
||||
if confirm "使用 Docker 沙盒执行技能?" "N" "${AUTO_USE_DOCKER}"; then
|
||||
SKILL_SANDBOX_MODE="docker"
|
||||
else
|
||||
SKILL_SANDBOX_MODE="none"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo -e "${CYAN}当前部署摘要${NC}"
|
||||
echo " 应用目录: ${APP_DIR}"
|
||||
echo " 运行用户: ${SERVICE_USER}:${SERVICE_GROUP}"
|
||||
echo " 域名: ${DOMAIN}"
|
||||
echo " 环境文件: ${ENV_FILE}"
|
||||
echo " Python: ${PYTHON_BIN}"
|
||||
echo " 数据源: ${FIN_DATA_SOURCE:-}"
|
||||
echo " 模型: ${MODEL_NAME:-}"
|
||||
echo " 沙盒模式: ${SKILL_SANDBOX_MODE:-none}"
|
||||
echo ""
|
||||
|
||||
if ! confirm "确认以上配置并继续写入系统文件?" "Y"; then
|
||||
fail "用户取消部署。"
|
||||
fi
|
||||
else
|
||||
echo -e "${GREEN}将使用现有的环境文件,跳过详细参数配置。${NC}"
|
||||
fi
|
||||
|
||||
if [[ ! -x "${PYTHON_BIN}" ]]; then
|
||||
warn "未找到 ${PYTHON_BIN},准备创建虚拟环境。"
|
||||
python3 -m venv "${APP_DIR}/.venv"
|
||||
"${APP_DIR}/.venv/bin/python" -m pip install --upgrade pip
|
||||
PYTHON_BIN="${APP_DIR}/.venv/bin/python"
|
||||
fi
|
||||
|
||||
log "安装后端依赖"
|
||||
"${PYTHON_BIN}" -m pip install -e "${APP_DIR}"
|
||||
|
||||
log "构建前端"
|
||||
(cd "${APP_DIR}/frontend" && npm install && npm run build)
|
||||
|
||||
if ! ${SKIP_ENV_CONFIG}; then
|
||||
log "写入环境变量文件 ${ENV_FILE}"
|
||||
write_env_file
|
||||
fi
|
||||
|
||||
if confirm "生成并安装 systemd unit?" "Y" "${AUTO_INSTALL_SYSTEMD}"; then
|
||||
render_systemd_unit "Agent Service" "backend.apps.agent_service:app" "8000" "1" "1024M" "/etc/systemd/system/bigtime-agent.service"
|
||||
render_systemd_unit "Trading Service" "backend.apps.trading_service:app" "8001" "1" "768M" "/etc/systemd/system/bigtime-trading.service"
|
||||
render_systemd_unit "News Service" "backend.apps.news_service:app" "8002" "1" "768M" "/etc/systemd/system/bigtime-news.service"
|
||||
render_systemd_unit "Runtime Service" "backend.apps.runtime_service:app" "8003" "1" "1536M" "/etc/systemd/system/bigtime-runtime.service"
|
||||
sudo systemctl daemon-reload
|
||||
if confirm "立即启用并启动 bigtime-* 服务?" "Y" "${AUTO_START_SYSTEMD}"; then
|
||||
sudo systemctl enable --now bigtime-agent.service
|
||||
sudo systemctl enable --now bigtime-trading.service
|
||||
sudo systemctl enable --now bigtime-news.service
|
||||
sudo systemctl enable --now bigtime-runtime.service
|
||||
fi
|
||||
fi
|
||||
|
||||
if confirm "生成并安装 nginx 配置?" "Y" "${AUTO_INSTALL_NGINX}"; then
|
||||
local use_tls="no"
|
||||
if confirm "使用 HTTPS/Let's Encrypt 证书路径?" "N" "${AUTO_USE_TLS}"; then
|
||||
SSL_CERT_PATH="${SSL_CERT_PATH:-$(ask_required 'SSL 证书 fullchain.pem 路径' "/etc/letsencrypt/live/${DOMAIN}/fullchain.pem")}"
|
||||
SSL_KEY_PATH="${SSL_KEY_PATH:-$(ask_required 'SSL 私钥 privkey.pem 路径' "/etc/letsencrypt/live/${DOMAIN}/privkey.pem")}"
|
||||
|
||||
local ssl_err=0
|
||||
[[ -f "${SSL_CERT_PATH}" ]] || { warn "SSL 证书文件不存在: ${SSL_CERT_PATH}"; ssl_err=1; }
|
||||
[[ -f "${SSL_KEY_PATH}" ]] || { warn "SSL 私钥文件不存在: ${SSL_KEY_PATH}"; ssl_err=1; }
|
||||
[[ -f "/etc/letsencrypt/options-ssl-nginx.conf" ]] || { warn "缺失 /etc/letsencrypt/options-ssl-nginx.conf,请检查 certbot 配置"; ssl_err=1; }
|
||||
[[ -f "/etc/letsencrypt/ssl-dhparams.pem" ]] || { warn "缺失 /etc/letsencrypt/ssl-dhparams.pem,请检查 certbot 配置"; ssl_err=1; }
|
||||
|
||||
if [[ ${ssl_err} -eq 0 ]]; then
|
||||
use_tls="yes"
|
||||
else
|
||||
warn "由于 SSL 关键文件缺失,将回退至 HTTP 模式,以确保 Nginx 能通过配置检查。"
|
||||
use_tls="no"
|
||||
fi
|
||||
else
|
||||
SSL_CERT_PATH=""
|
||||
SSL_KEY_PATH=""
|
||||
fi
|
||||
NGINX_TARGET="/etc/nginx/conf.d/bigtime.conf"
|
||||
render_nginx_conf "${NGINX_TARGET}" "${use_tls}"
|
||||
if confirm "立即执行 nginx -t 并生效配置?" "Y" "${AUTO_RELOAD_NGINX}"; then
|
||||
log "正在验证 Nginx 配置..."
|
||||
if ! sudo nginx -t; then
|
||||
fail "Nginx 配置检查失败!请根据上方报错信息调整。常见的错误包括:80/443 端口被占用,或 server_name 冲突。"
|
||||
fi
|
||||
|
||||
if systemctl is-active --quiet nginx; then
|
||||
log "Nginx 正在运行,执行 reload..."
|
||||
sudo systemctl reload nginx
|
||||
else
|
||||
log "Nginx 未运行,尝试启动..."
|
||||
sudo systemctl enable --now nginx
|
||||
fi
|
||||
|
||||
# 关键修复:确保 nginx 用户对 /root 路径有 x 权限
|
||||
if [[ "${APP_DIR}" == /root/* ]]; then
|
||||
log "检测到应用部署在 /root 下,正在修复父目录访问权限..."
|
||||
sudo chmod o+x /root 2>/dev/null || true
|
||||
sudo chmod o+x "$(dirname "${APP_DIR}")" 2>/dev/null || true
|
||||
sudo chmod -R o+rX "${APP_DIR}"
|
||||
fi
|
||||
|
||||
log "Nginx 配置已生效。"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
log "部署向导完成"
|
||||
echo "应用目录: ${APP_DIR}"
|
||||
echo "环境文件: ${ENV_FILE}"
|
||||
echo "Python: ${PYTHON_BIN}"
|
||||
echo "沙盒模式: ${SKILL_SANDBOX_MODE}"
|
||||
echo ""
|
||||
echo "验证建议:"
|
||||
echo " curl http://127.0.0.1:8003/health"
|
||||
echo " curl http://127.0.0.1:8003/api/runtime/current"
|
||||
echo " sudo systemctl status bigtime-runtime.service"
|
||||
echo " tail -f ${APP_DIR}/runs/<run_id>/logs/gateway.log"
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@@ -2,8 +2,9 @@ server {
|
||||
listen 80;
|
||||
server_name bigtime.cillinn.com;
|
||||
|
||||
root /opt/bigtime/app/frontend/dist;
|
||||
|
||||
location /.well-known/acme-challenge/ {
|
||||
root /var/www/bigtime/current;
|
||||
allow all;
|
||||
}
|
||||
|
||||
@@ -16,7 +17,7 @@ server {
|
||||
listen 443 ssl http2;
|
||||
server_name bigtime.cillinn.com;
|
||||
|
||||
root /var/www/bigtime/current;
|
||||
root /opt/bigtime/app/frontend/dist;
|
||||
index index.html;
|
||||
|
||||
ssl_certificate /etc/letsencrypt/live/bigtime.cillinn.com/fullchain.pem;
|
||||
@@ -36,6 +37,56 @@ server {
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location /api/runtime/ {
|
||||
proxy_pass http://127.0.0.1:8003;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location /api/dynamic-team/ {
|
||||
proxy_pass http://127.0.0.1:8003;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location /api/trading/ {
|
||||
proxy_pass http://127.0.0.1:8001;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location /api/news/ {
|
||||
proxy_pass http://127.0.0.1:8002;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location /api/ {
|
||||
proxy_pass http://127.0.0.1:8000;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location / {
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
@@ -2,13 +2,75 @@ server {
|
||||
listen 80;
|
||||
server_name bigtime.cillinn.com;
|
||||
|
||||
root /var/www/bigtime/current;
|
||||
root /opt/bigtime/app/frontend/dist;
|
||||
index index.html;
|
||||
|
||||
location /.well-known/acme-challenge/ {
|
||||
allow all;
|
||||
}
|
||||
|
||||
location /ws {
|
||||
proxy_pass http://127.0.0.1:8765;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location /api/runtime/ {
|
||||
proxy_pass http://127.0.0.1:8003;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location /api/dynamic-team/ {
|
||||
proxy_pass http://127.0.0.1:8003;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location /api/trading/ {
|
||||
proxy_pass http://127.0.0.1:8001;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location /api/news/ {
|
||||
proxy_pass http://127.0.0.1:8002;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location /api/ {
|
||||
proxy_pass http://127.0.0.1:8000;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_read_timeout 300s;
|
||||
}
|
||||
|
||||
location / {
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
134
deploy/production-deployment.md
Normal file
@@ -0,0 +1,134 @@
|
||||
# Production Deployment
|
||||
|
||||
This is the recommended production deployment mode for the current repository.
|
||||
|
||||
## Recommendation
|
||||
|
||||
Use:
|
||||
|
||||
- split FastAPI services
|
||||
- `systemd` as the process supervisor
|
||||
- `nginx` as TLS terminator and reverse proxy
|
||||
- static frontend build served by `nginx`
|
||||
- Docker-based skill sandbox
|
||||
|
||||
This matches the current architecture better than a monolithic process and is
|
||||
lower-risk than introducing Kubernetes at the current stage.
|
||||
|
||||
## Why This Mode Fits Best
|
||||
|
||||
1. The repository already uses a split-service runtime model.
|
||||
2. `runtime_service` is the correct control-plane entrypoint for starting and
|
||||
stopping Gateway subprocesses.
|
||||
3. The Gateway is run-scoped and ephemeral, which fits `systemd` + subprocess
|
||||
management better than forcing everything into a single service binary.
|
||||
4. Skill execution has security requirements; Docker sandboxing is the practical
|
||||
production default.
|
||||
|
||||
## Service Layout
|
||||
|
||||
| Component | Bind |
|
||||
|----------|------|
|
||||
| `agent_service` | `127.0.0.1:8000` |
|
||||
| `trading_service` | `127.0.0.1:8001` |
|
||||
| `news_service` | `127.0.0.1:8002` |
|
||||
| `runtime_service` | `127.0.0.1:8003` |
|
||||
| gateway websocket | spawned by `runtime_service` |
|
||||
| `nginx` | `:80` / `:443` |
|
||||
|
||||
## Frontend
|
||||
|
||||
Recommended frontend mode:
|
||||
|
||||
```bash
|
||||
cd frontend
|
||||
npm install
|
||||
npm run build
|
||||
```
|
||||
|
||||
Then point `nginx` root at:
|
||||
|
||||
```text
|
||||
/opt/bigtime/app/frontend/dist
|
||||
```
|
||||
|
||||
This is preferred over running `backend.apps.frontend_service` in production,
|
||||
because static serving via `nginx` is simpler and more reliable.
|
||||
|
||||
## Environment
|
||||
|
||||
Create a shared environment file, for example:
|
||||
|
||||
```bash
|
||||
sudo mkdir -p /etc/bigtime
|
||||
sudo cp .env /etc/bigtime/bigtime.env
|
||||
```
|
||||
|
||||
Required production settings:
|
||||
|
||||
```bash
|
||||
AGENT_SERVICE_URL=http://127.0.0.1:8000
|
||||
TRADING_SERVICE_URL=http://127.0.0.1:8001
|
||||
NEWS_SERVICE_URL=http://127.0.0.1:8002
|
||||
RUNTIME_SERVICE_URL=http://127.0.0.1:8003
|
||||
|
||||
SKILL_SANDBOX_MODE=docker
|
||||
SKILL_SANDBOX_MEMORY_LIMIT=512m
|
||||
SKILL_SANDBOX_CPU_LIMIT=1.0
|
||||
SKILL_SANDBOX_NETWORK=none
|
||||
SKILL_SANDBOX_TIMEOUT=60
|
||||
```
|
||||
|
||||
Also supply the required market/model API keys in the same environment file or
|
||||
through your secret-management system.
|
||||
|
||||
## Data Persistence
|
||||
|
||||
Persist these paths on durable storage:
|
||||
|
||||
- `runs/`
|
||||
- `logs/` if you keep service logs on disk
|
||||
- optional `.env`-backed secrets should not live inside the repo working tree
|
||||
|
||||
The key runtime source of truth is:
|
||||
|
||||
- `runs/<run_id>/state/runtime_state.json`
|
||||
- `runs/<run_id>/state/server_state.json`
|
||||
- `runs/<run_id>/logs/gateway.log`
|
||||
|
||||
## nginx Pattern
|
||||
|
||||
Recommended routing:
|
||||
|
||||
- `/` -> static frontend
|
||||
- `/api/runtime/*` -> `127.0.0.1:8003`
|
||||
- `/api/dynamic-team/*` -> `127.0.0.1:8003`
|
||||
- `/api/trading/*` -> `127.0.0.1:8001`
|
||||
- `/api/news/*` -> `127.0.0.1:8002`
|
||||
- `/api/*` -> `127.0.0.1:8000`
|
||||
- `/ws` -> gateway websocket
|
||||
|
||||
The checked-in nginx config should be treated as a starting point, not a full
|
||||
multi-service production config.
|
||||
|
||||
## Operational Notes
|
||||
|
||||
- Use `workers=1` for `runtime_service` unless you deliberately redesign the
|
||||
runtime manager around multi-process coordination.
|
||||
- Keep the other API services stateless and scale them separately if needed.
|
||||
- Monitor:
|
||||
- `runtime_service`
|
||||
- run-scoped `gateway.log`
|
||||
- Docker daemon health
|
||||
- Rotate logs outside the app, e.g. with journald or logrotate.
|
||||
|
||||
## Best Next Step
|
||||
|
||||
Deploy with:
|
||||
|
||||
- `systemd` units from [deploy/systemd](/Users/cillin/workspeace/evotraders/deploy/systemd)
|
||||
- `nginx` in front
|
||||
- one VM first
|
||||
|
||||
Only move to containers/orchestration after the runtime/gateway operational
|
||||
behavior is stable in that simpler topology.
|
||||
47
deploy/systemd/README.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# systemd Units
|
||||
|
||||
This directory contains recommended `systemd` unit templates for the current
|
||||
split-service production topology.
|
||||
|
||||
## Recommended Topology
|
||||
|
||||
- `agent_service` on `127.0.0.1:8000`
|
||||
- `trading_service` on `127.0.0.1:8001`
|
||||
- `news_service` on `127.0.0.1:8002`
|
||||
- `runtime_service` on `127.0.0.1:8003`
|
||||
- `nginx` serves `frontend/dist` and proxies `/api/*` + `/ws`
|
||||
- `runtime_service` spawns the run-scoped Gateway subprocess on demand
|
||||
- skill execution runs with `SKILL_SANDBOX_MODE=docker`
|
||||
|
||||
## Install
|
||||
|
||||
Adjust these placeholders before installing:
|
||||
|
||||
- `/opt/bigtime/app` -> repository root on the server
|
||||
- `/opt/bigtime/app/.venv/bin/python` -> Python interpreter
|
||||
- `/etc/bigtime/bigtime.env` -> shared environment file
|
||||
- `bigtime` -> service user
|
||||
|
||||
Then copy units:
|
||||
|
||||
```bash
|
||||
sudo mkdir -p /etc/bigtime
|
||||
sudo cp .env /etc/bigtime/bigtime.env
|
||||
|
||||
sudo cp deploy/systemd/bigtime-*.service /etc/systemd/system/
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable --now bigtime-agent.service
|
||||
sudo systemctl enable --now bigtime-trading.service
|
||||
sudo systemctl enable --now bigtime-news.service
|
||||
sudo systemctl enable --now bigtime-runtime.service
|
||||
```
|
||||
|
||||
## Frontend
|
||||
|
||||
Recommended production frontend mode:
|
||||
|
||||
- build with `cd frontend && npm install && npm run build`
|
||||
- let `nginx` serve `frontend/dist` directly
|
||||
|
||||
The repository also contains `backend.apps.frontend_service`, but for
|
||||
production the lower-complexity path is static hosting via `nginx`.
|
||||
25
deploy/systemd/bigtime-agent.service
Normal file
@@ -0,0 +1,25 @@
|
||||
[Unit]
|
||||
Description=BigTime Agent Service
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=bigtime
|
||||
Group=bigtime
|
||||
WorkingDirectory=/opt/bigtime/app
|
||||
EnvironmentFile=/etc/bigtime/bigtime.env
|
||||
ExecStart=/opt/bigtime/app/.venv/bin/python -m uvicorn backend.apps.agent_service:app --host 127.0.0.1 --port 8000 --workers 1 --log-level warning --no-access-log
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
TimeoutStopSec=20
|
||||
KillMode=mixed
|
||||
NoNewPrivileges=true
|
||||
PrivateTmp=true
|
||||
ProtectSystem=full
|
||||
ProtectHome=true
|
||||
LimitNOFILE=65535
|
||||
TasksMax=4096
|
||||
MemoryMax=1024M
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
25
deploy/systemd/bigtime-news.service
Normal file
@@ -0,0 +1,25 @@
|
||||
[Unit]
|
||||
Description=BigTime News Service
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=bigtime
|
||||
Group=bigtime
|
||||
WorkingDirectory=/opt/bigtime/app
|
||||
EnvironmentFile=/etc/bigtime/bigtime.env
|
||||
ExecStart=/opt/bigtime/app/.venv/bin/python -m uvicorn backend.apps.news_service:app --host 127.0.0.1 --port 8002 --workers 1 --log-level warning --no-access-log
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
TimeoutStopSec=20
|
||||
KillMode=mixed
|
||||
NoNewPrivileges=true
|
||||
PrivateTmp=true
|
||||
ProtectSystem=full
|
||||
ProtectHome=true
|
||||
LimitNOFILE=65535
|
||||
TasksMax=4096
|
||||
MemoryMax=768M
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
25
deploy/systemd/bigtime-runtime.service
Normal file
@@ -0,0 +1,25 @@
|
||||
[Unit]
|
||||
Description=BigTime Runtime Service
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=bigtime
|
||||
Group=bigtime
|
||||
WorkingDirectory=/opt/bigtime/app
|
||||
EnvironmentFile=/etc/bigtime/bigtime.env
|
||||
ExecStart=/opt/bigtime/app/.venv/bin/python -m uvicorn backend.apps.runtime_service:app --host 127.0.0.1 --port 8003 --workers 1 --log-level warning --no-access-log
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
TimeoutStopSec=30
|
||||
KillMode=mixed
|
||||
NoNewPrivileges=true
|
||||
PrivateTmp=true
|
||||
ProtectSystem=full
|
||||
ProtectHome=true
|
||||
LimitNOFILE=65535
|
||||
TasksMax=4096
|
||||
MemoryMax=1536M
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
25
deploy/systemd/bigtime-trading.service
Normal file
@@ -0,0 +1,25 @@
|
||||
[Unit]
|
||||
Description=BigTime Trading Service
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=bigtime
|
||||
Group=bigtime
|
||||
WorkingDirectory=/opt/bigtime/app
|
||||
EnvironmentFile=/etc/bigtime/bigtime.env
|
||||
ExecStart=/opt/bigtime/app/.venv/bin/python -m uvicorn backend.apps.trading_service:app --host 127.0.0.1 --port 8001 --workers 1 --log-level warning --no-access-log
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
TimeoutStopSec=20
|
||||
KillMode=mixed
|
||||
NoNewPrivileges=true
|
||||
PrivateTmp=true
|
||||
ProtectSystem=full
|
||||
ProtectHome=true
|
||||
LimitNOFILE=65535
|
||||
TasksMax=4096
|
||||
MemoryMax=768M
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,22 +0,0 @@
|
||||
[Unit]
|
||||
Description=大时代 Production Service
|
||||
After=network.target
|
||||
# COMPATIBILITY_SURFACE: stable
|
||||
# OWNER: ops-team
|
||||
# SEE: docs/legacy-inventory.md#gateway-first-production-example
|
||||
#
|
||||
# This systemd unit runs the gateway-first production topology.
|
||||
# It executes scripts/run_prod.sh which launches backend.main as the
|
||||
# primary gateway/runtime process. For split-service deployment topology,
|
||||
# see docs/current-architecture.md and deploy/README.md
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/root/code/evotraders
|
||||
ExecStart=/root/code/evotraders/scripts/run_prod.sh
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
Environment=PYTHONUNBUFFERED=1
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
47
deploy/uninstall-production.sh
Normal file
@@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
log() {
|
||||
echo -e "${GREEN}[bigtime]${NC} $*"
|
||||
}
|
||||
|
||||
warn() {
|
||||
echo -e "${YELLOW}[bigtime]${NC} $*"
|
||||
}
|
||||
|
||||
SYSTEMD_UNITS=(
|
||||
bigtime-agent.service
|
||||
bigtime-trading.service
|
||||
bigtime-news.service
|
||||
bigtime-runtime.service
|
||||
)
|
||||
|
||||
NGINX_CONF="/etc/nginx/conf.d/bigtime.conf"
|
||||
ENV_FILE="/etc/bigtime/bigtime.env"
|
||||
|
||||
for unit in "${SYSTEMD_UNITS[@]}"; do
|
||||
if systemctl list-unit-files "${unit}" >/dev/null 2>&1; then
|
||||
warn "Stopping ${unit}"
|
||||
sudo systemctl disable --now "${unit}" || true
|
||||
sudo rm -f "/etc/systemd/system/${unit}"
|
||||
fi
|
||||
done
|
||||
|
||||
sudo systemctl daemon-reload || true
|
||||
|
||||
if [[ -f "${NGINX_CONF}" ]]; then
|
||||
warn "Removing nginx config ${NGINX_CONF}"
|
||||
sudo rm -f "${NGINX_CONF}"
|
||||
sudo nginx -t && sudo systemctl reload nginx || true
|
||||
fi
|
||||
|
||||
if [[ -f "${ENV_FILE}" ]]; then
|
||||
warn "Keeping env file ${ENV_FILE}"
|
||||
warn "Delete it manually if you want a full cleanup."
|
||||
fi
|
||||
|
||||
log "BigTime production service uninstall finished."
|
||||
@@ -1,239 +0,0 @@
|
||||
# 关键代码修复方案
|
||||
|
||||
## 1. EvoAgent 长期记忆支持 ✅
|
||||
|
||||
**状态**: EvoAgent 已支持 `long_term_memory` 参数,但需要移除 Legacy 回退逻辑
|
||||
|
||||
**需要修改的文件**:
|
||||
- `backend/main.py` 第 158-176 行 - 移除记忆启用时的 Legacy 回退
|
||||
- `backend/core/pipeline.py` - 同样更新
|
||||
- `backend/core/pipeline_runner.py` - 同样更新
|
||||
|
||||
**修复代码** (main.py):
|
||||
```python
|
||||
def _create_analyst_agent(...):
|
||||
# ... 工具包创建代码 ...
|
||||
|
||||
use_evo_agent = analyst_type in _resolve_evo_agent_ids()
|
||||
|
||||
if use_evo_agent:
|
||||
workspace_dir = skills_manager.get_agent_asset_dir(config_name, analyst_type)
|
||||
agent_config = load_agent_workspace_config(workspace_dir / "agent.yaml")
|
||||
agent = EvoAgent(
|
||||
agent_id=analyst_type,
|
||||
config_name=config_name,
|
||||
workspace_dir=workspace_dir,
|
||||
model=model,
|
||||
formatter=formatter,
|
||||
skills_manager=skills_manager,
|
||||
prompt_files=agent_config.prompt_files,
|
||||
long_term_memory=long_term_memory, # 已支持
|
||||
long_term_memory_mode="static_control",
|
||||
)
|
||||
agent.toolkit = toolkit
|
||||
setattr(agent, "workspace_id", config_name)
|
||||
return agent
|
||||
|
||||
# Legacy fallback (deprecated)
|
||||
return AnalystAgent(...)
|
||||
```
|
||||
|
||||
## 2. Workspace ID 语义清理
|
||||
|
||||
**问题**: `workspace_id` 同时用于 design-time 和 runtime 两个不同概念
|
||||
|
||||
**修复方案**:
|
||||
|
||||
```python
|
||||
# backend/api/workspaces.py
|
||||
# 明确区分两种资源
|
||||
|
||||
# Design-time workspaces (CRUD)
|
||||
@router.get("/design-workspaces/{workspace_id}/...")
|
||||
async def get_design_workspace(workspace_id: str): ...
|
||||
|
||||
# Runtime runs (只读)
|
||||
@router.get("/runs/{run_id}/agents/{agent_id}/...")
|
||||
async def get_runtime_agent(run_id: str, agent_id: str): ...
|
||||
```
|
||||
|
||||
## 3. ToolGuard 与 Gateway 审批同步 ✅ 已完成
|
||||
|
||||
**状态**: 审批同步已完善,添加了批量审批支持
|
||||
|
||||
**API 端点**:
|
||||
- `POST /api/guard/check` - 检查工具调用是否需要审批
|
||||
- `POST /api/guard/approve` - 批准单个工具调用
|
||||
- `POST /api/guard/approve/batch` - ✅ 批量批准多个工具调用(新增)
|
||||
- `POST /api/guard/deny` - 拒绝工具调用
|
||||
- `GET /api/guard/pending` - 获取待审批列表
|
||||
|
||||
**批量审批示例**:
|
||||
```python
|
||||
# 批量批准
|
||||
await approve_tool_calls(
|
||||
BatchApprovalRequest(
|
||||
approval_ids=["approval_001", "approval_002", "approval_003"],
|
||||
one_time=True,
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
**超时处理**: 默认 300 秒超时,可在 `ToolGuardMixin._init_tool_guard()` 中配置
|
||||
|
||||
## 4. Smoke Test 依赖修复
|
||||
|
||||
**需要的依赖**:
|
||||
```bash
|
||||
pip install pandas numpy matplotlib seaborn
|
||||
pip install finnhub-python yfinance
|
||||
pip install loguru rich
|
||||
pip install websockets
|
||||
pip install httpx requests
|
||||
pip install PyYAML
|
||||
pip install pandas-market-calendars exchange-calendars
|
||||
```
|
||||
|
||||
## 5. 统一 Agent 工厂 ✅ 已完成
|
||||
|
||||
**文件** `backend/agents/unified_factory.py`:
|
||||
|
||||
统一工厂已创建,支持:
|
||||
- 所有 6 种 Agent 角色的创建
|
||||
- 自动 EvoAgent vs Legacy Agent 选择
|
||||
- Workspace 驱动配置
|
||||
- 长期记忆支持
|
||||
|
||||
```python
|
||||
from backend.agents.unified_factory import UnifiedAgentFactory, get_agent_factory
|
||||
|
||||
# 使用示例
|
||||
factory = UnifiedAgentFactory(
|
||||
config_name="smoke_fullstack",
|
||||
skills_manager=skills_manager,
|
||||
)
|
||||
|
||||
# 创建分析师
|
||||
analyst = factory.create_analyst(
|
||||
analyst_type="fundamentals_analyst",
|
||||
model=model,
|
||||
formatter=formatter,
|
||||
long_term_memory=memory,
|
||||
)
|
||||
```
|
||||
|
||||
## 6. EvoAgent 默认启用
|
||||
|
||||
**修改** `backend/config/constants.py`:
|
||||
|
||||
```python
|
||||
# 默认所有角色使用 EvoAgent
|
||||
DEFAULT_EVO_AGENT_ROLES = {
|
||||
"fundamentals_analyst",
|
||||
"technical_analyst",
|
||||
"sentiment_analyst",
|
||||
"valuation_analyst",
|
||||
"risk_manager",
|
||||
"portfolio_manager",
|
||||
}
|
||||
|
||||
# EVO_AGENT_IDS 现在用于选择性地禁用 EvoAgent
|
||||
# 如果设置,只启用指定的角色
|
||||
# 如果未设置,启用所有角色
|
||||
```
|
||||
|
||||
**修改** `backend/main.py`:
|
||||
```python
|
||||
def _resolve_evo_agent_ids() -> set[str]:
|
||||
"""Return agent ids selected to use EvoAgent.
|
||||
|
||||
By default, all supported roles use EvoAgent.
|
||||
EVO_AGENT_IDS can be used to limit to specific roles.
|
||||
"""
|
||||
from backend.config.constants import DEFAULT_EVO_AGENT_ROLES
|
||||
|
||||
raw = os.getenv("EVO_AGENT_IDS", "")
|
||||
if raw.strip():
|
||||
# Filter to only valid roles
|
||||
requested = {x.strip() for x in raw.split(",") if x.strip()}
|
||||
return requested & DEFAULT_EVO_AGENT_ROLES
|
||||
|
||||
# Default: all roles use EvoAgent
|
||||
return DEFAULT_EVO_AGENT_ROLES
|
||||
```
|
||||
|
||||
## 7. 遗留代码清理
|
||||
|
||||
**可以删除的文件**:
|
||||
- `backend/agents/compat.py` ✅ 已删除
|
||||
- `frontend/src/hooks/useWebsocketSessionSync.js` ✅ 已删除
|
||||
|
||||
**标记为废弃的文件** ✅ 已完成:
|
||||
- `backend/agents/analyst.py` - 已添加 DeprecationWarning
|
||||
- `backend/agents/risk_manager.py` - 已添加 DeprecationWarning
|
||||
- `backend/agents/portfolio_manager.py` - 已添加 DeprecationWarning
|
||||
|
||||
## 8. 测试修复
|
||||
|
||||
**更新** `backend/tests/test_evo_agent_selection.py`:
|
||||
|
||||
移除这些测试 ✅ 已完成:
|
||||
- `test_main_create_analyst_agent_falls_back_to_legacy_when_memory_enabled`
|
||||
- `test_main_create_risk_manager_falls_back_to_legacy_when_memory_enabled`
|
||||
- `test_main_create_portfolio_manager_falls_back_to_legacy_when_memory_enabled`
|
||||
|
||||
添加新测试 ✅ 已完成:
|
||||
- `test_evo_agent_supports_long_term_memory`
|
||||
- `test_all_roles_use_evo_agent_by_default`
|
||||
|
||||
新增集成测试文件 ✅ 已完成:
|
||||
- `backend/tests/test_evo_agent_integration.py` - 13 个集成测试覆盖 Factory、ToolGuard、Workspace 集成
|
||||
|
||||
## 9. 快速修复清单
|
||||
|
||||
运行以下命令应用关键修复:
|
||||
|
||||
```bash
|
||||
# 1. 修复 EvoAgent 记忆支持 (修改 main.py, pipeline.py, pipeline_runner.py)
|
||||
# 移除 long_term_memory 检查导致的 Legacy 回退
|
||||
|
||||
# 2. 修复默认 EvoAgent 启用
|
||||
sed -i 's/def _resolve_evo_agent_ids():/def _resolve_evo_agent_ids() -> set[str]:/' backend/main.py
|
||||
|
||||
# 3. 确保所有测试通过
|
||||
pytest backend/tests/test_evo_agent_selection.py -v
|
||||
|
||||
# 4. 运行 smoke test
|
||||
python3 scripts/smoke_evo_runtime.py --test-all-roles
|
||||
```
|
||||
|
||||
## 10. 实施进度
|
||||
|
||||
### ✅ 已完成
|
||||
|
||||
| 任务 | 状态 | 文件 |
|
||||
|------|------|------|
|
||||
| EvoAgent 长期记忆支持 | ✅ 已完成 | `evo_agent.py`, `main.py` |
|
||||
| 默认启用所有角色 EvoAgent | ✅ 已完成 | `main.py`, `pipeline.py` |
|
||||
| 统一 Agent 工厂 | ✅ 已完成 | `unified_factory.py` |
|
||||
| ToolGuard Gateway 同步 | ✅ 已完成 | `tool_guard.py`, `guard.py` |
|
||||
| ToolGuard 批量审批 | ✅ 已完成 | `guard.py` |
|
||||
| 废弃标记 Legacy Agent | ✅ 已完成 | `analyst.py`, `risk_manager.py`, `portfolio_manager.py` |
|
||||
| 集成测试 | ✅ 已完成 | `test_evo_agent_integration.py` |
|
||||
| 类型注解 | ✅ 已完成 | `unified_factory.py` |
|
||||
| Team 基础设施 | ✅ 已完成 | `messenger.py`, `task_delegator.py` |
|
||||
| Skills 沙盒执行 | ✅ 已完成 | `sandboxed_executor.py` |
|
||||
|
||||
### 🚧 待完成
|
||||
|
||||
| 优先级 | 任务 | 说明 |
|
||||
|--------|------|------|
|
||||
| P0 | Smoke Test 依赖修复 | 需要安装 pandas, finnhub, pandas-market-calendars 等 |
|
||||
| P1 | Workspace ID 语义清理 | ✅ 已添加 `run_id`,保留 `workspace_id` 用于向后兼容 |
|
||||
| P2 | 文档完善 | ✅ 已完成 |
|
||||
|
||||
*最后更新: 2026-04-02*
|
||||
|
||||
---
|
||||
|
||||
*文档生成时间: 2026-04-01*
|
||||
@@ -1,249 +0,0 @@
|
||||
# 大时代项目优化和功能补齐计划
|
||||
|
||||
## 当前状态评估
|
||||
|
||||
### 已完成的工作
|
||||
1. ✅ EvoAgent 核心实现 (`backend/agents/base/evo_agent.py`)
|
||||
2. ✅ ToolGuardMixin 工具守卫 (`backend/agents/base/tool_guard.py`)
|
||||
3. ✅ Hooks 系统 (`backend/agents/base/hooks.py`)
|
||||
4. ✅ Smoke test 脚本 (`scripts/smoke_evo_runtime.py`)
|
||||
5. ✅ 选择性 EvoAgent 测试 (`backend/tests/test_evo_agent_selection.py`)
|
||||
6. ✅ 删除 `backend/agents/compat.py` 兼容性层
|
||||
7. ✅ 删除 `useWebsocketSessionSync.js` 旧钩子
|
||||
|
||||
### 遗留问题清单
|
||||
|
||||
#### 🔴 P0: 阻塞 EvoAgent 全面推出
|
||||
|
||||
| # | 问题 | 位置 | 影响 | 解决方案 |
|
||||
|---|------|------|------|----------|
|
||||
| P0-1 | EvoAgent 不支持长期记忆 | `evo_agent.py:165-166` | 启用 memory 时回退到 Legacy Agent | 集成 ReMe 记忆系统 |
|
||||
| P0-2 | Pipeline 运行时分析师创建路径不一致 | `pipeline.py` | 运行时动态创建可能跳过 EvoAgent 路径 | 统一 `_create_runtime_analyst` 逻辑 |
|
||||
| P0-3 | Workspace 加载路径混乱 | `workspace.py`, `workspace_manager.py` | `workspace_id` vs `run_id` 语义混合 | 明确区分 design-time 和 runtime 路径 |
|
||||
| P0-4 | Smoke test 失败排查 | `scripts/smoke_evo_runtime.py` | 无法验证 EvoAgent 是否正确启动 | 修复测试并确保通过 |
|
||||
|
||||
#### 🟡 P1: 功能完善
|
||||
|
||||
| # | 问题 | 位置 | 影响 | 解决方案 |
|
||||
|---|------|------|------|----------|
|
||||
| P1-1 | Team 基础设施未完成 | `evo_agent.py:41-48` | Agent 间通信和任务委托不可用 | 完成 messenger 和 task_delegator |
|
||||
| P1-2 | ToolGuard 与 Gateway 审批流程集成 | `tool_guard.py`, `api/guard.py` | 审批状态同步可能不一致 | 统一审批存储和事件通知 |
|
||||
| P1-3 | Skills 沙盒执行 | `tools/sandboxed_executor.py` | 生产环境需要 Docker 隔离 | 完善沙盒执行器 |
|
||||
| P1-4 | 错误处理和重试机制 | 多处 | 部分错误未正确处理 | 添加统一的错误处理 |
|
||||
|
||||
#### 🟢 P2: 代码质量和可维护性
|
||||
|
||||
| # | 问题 | 位置 | 影响 | 解决方案 |
|
||||
|---|------|------|------|----------|
|
||||
| P2-1 | 重复的 Agent 创建逻辑 | `main.py`, `pipeline.py`, `pipeline_runner.py` | 维护困难,容易遗漏 | 提取统一的 Agent 工厂 |
|
||||
| P2-2 | 类型注解不完整 | 多处 | IDE 提示不足 | 完善类型注解 |
|
||||
| P2-3 | 缺少 EvoAgent 集成测试 | `backend/tests/` | 无法确保功能完整 | 添加集成测试 |
|
||||
| P2-4 | 文档和注释 | 多处 | 新贡献者理解困难 | 完善文档 |
|
||||
|
||||
---
|
||||
|
||||
## 详细实施方案
|
||||
|
||||
### Phase 1: P0 阻塞问题修复
|
||||
|
||||
#### P0-1: EvoAgent 长期记忆支持
|
||||
|
||||
**问题描述**:
|
||||
```python
|
||||
# main.py 中当前逻辑
|
||||
if long_term_memory and agent_id not in EVO_AGENT_IDS:
|
||||
# 使用 Legacy Agent
|
||||
else:
|
||||
# 使用 EvoAgent
|
||||
```
|
||||
|
||||
**目标**: EvoAgent 支持 ReMe 长期记忆系统
|
||||
|
||||
**实施步骤**:
|
||||
1. 在 `EvoAgent.__init__` 中正确接收 `long_term_memory` 参数
|
||||
2. 集成 ReMe 记忆系统的读写
|
||||
3. 在 Hooks 中添加记忆相关的生命周期管理
|
||||
4. 修改 `main.py`, `pipeline.py` 中移除 EvoAgent 的记忆回退逻辑
|
||||
|
||||
**文件修改**:
|
||||
- `backend/agents/base/evo_agent.py`
|
||||
- `backend/main.py`
|
||||
- `backend/core/pipeline.py`
|
||||
|
||||
#### P0-2: Pipeline 运行时分析师创建统一
|
||||
|
||||
**问题描述**:
|
||||
`TradingPipeline._create_runtime_analyst` 方法需要确保:
|
||||
1. 检查 `EVO_AGENT_IDS` 环境变量
|
||||
2. 正确传递所有必要参数给 EvoAgent
|
||||
3. 处理 workspace 资产准备
|
||||
|
||||
**实施步骤**:
|
||||
1. 统一 `pipeline.py` 和 `main.py` 中的 Agent 创建逻辑
|
||||
2. 确保 EvoAgent 路径和 Legacy 路径参数一致
|
||||
3. 添加运行时动态 Agent 创建的测试
|
||||
|
||||
**文件修改**:
|
||||
- `backend/core/pipeline.py`
|
||||
- `backend/main.py`
|
||||
|
||||
#### P0-3: Workspace 路径清理
|
||||
|
||||
**问题描述**:
|
||||
- `workspace_id` 有时指 `workspaces/` 目录下的设计时 workspace
|
||||
- 有时指 `runs/<run_id>/` 下的运行时 workspace
|
||||
|
||||
**解决方案**:
|
||||
1. 明确命名:`design_workspace_id` vs `run_id`
|
||||
2. 在 API 路由中区分两种资源
|
||||
3. 内部统一使用 `run_id` 作为运行时标识
|
||||
|
||||
**文件修改**:
|
||||
- `backend/api/workspaces.py`
|
||||
- `backend/api/agents.py`
|
||||
- `backend/agents/workspace_manager.py`
|
||||
|
||||
#### P0-4: Smoke Test 修复
|
||||
|
||||
**当前测试**:
|
||||
```bash
|
||||
python3 scripts/smoke_evo_runtime.py --agent-id fundamentals_analyst
|
||||
```
|
||||
|
||||
**验证点**:
|
||||
1. Gateway 正常启动
|
||||
2. EvoAgent 日志出现
|
||||
3. `runtime_state.json` 正确写入
|
||||
4. 审批流程正常工作
|
||||
|
||||
**实施步骤**:
|
||||
1. 运行测试并识别失败点
|
||||
2. 修复 EvoAgent 初始化问题
|
||||
3. 确保所有 6 个角色都能通过测试
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: P1 功能完善
|
||||
|
||||
#### P1-1: Team 基础设施
|
||||
|
||||
**当前状态**:
|
||||
```python
|
||||
try:
|
||||
from backend.agents.team.messenger import AgentMessenger
|
||||
from backend.agents.team.task_delegator import TaskDelegator
|
||||
TEAM_INFRA_AVAILABLE = True
|
||||
except ImportError:
|
||||
TEAM_INFRA_AVAILABLE = False
|
||||
```
|
||||
|
||||
**目标**: 完成 Agent 间通信和任务委托
|
||||
|
||||
**实施步骤**:
|
||||
1. 完成 `AgentMessenger` 实现
|
||||
2. 完成 `TaskDelegator` 实现
|
||||
3. 添加 Agent 团队协调的测试
|
||||
|
||||
#### P1-2: ToolGuard 与 Gateway 集成
|
||||
|
||||
**当前状态**:
|
||||
- `ToolGuardStore` 是内存存储
|
||||
- Gateway 通过 `get_global_runtime_manager()` 访问
|
||||
|
||||
**改进**:
|
||||
1. 确保审批状态在 Gateway 和 Agent 间同步
|
||||
2. 添加审批超时处理
|
||||
3. 支持批量审批
|
||||
|
||||
#### P1-3: Skills 沙盒执行
|
||||
|
||||
**当前状态**:
|
||||
```python
|
||||
SKILL_SANDBOX_MODE=none # 开发模式,直接执行
|
||||
```
|
||||
|
||||
**目标**: 生产环境使用 Docker 隔离
|
||||
|
||||
**实施步骤**:
|
||||
1. 完成 `DockerSandboxBackend`
|
||||
2. 添加资源限制(CPU、内存、网络)
|
||||
3. 添加执行超时控制
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: P2 代码质量
|
||||
|
||||
#### P2-1: 统一 Agent 工厂
|
||||
|
||||
**目标**: 提取 `AgentFactory` 统一处理所有 Agent 创建
|
||||
|
||||
**设计**:
|
||||
```python
|
||||
class AgentFactory:
|
||||
def create_analyst(self, analyst_type: str, **kwargs) -> BaseAgent
|
||||
def create_risk_manager(self, **kwargs) -> BaseAgent
|
||||
def create_portfolio_manager(self, **kwargs) -> BaseAgent
|
||||
```
|
||||
|
||||
#### P2-2: 类型注解
|
||||
|
||||
**目标**: 所有公共 API 完整的类型注解
|
||||
|
||||
#### P2-3: 集成测试
|
||||
|
||||
**目标**: EvoAgent 完整的端到端测试
|
||||
|
||||
---
|
||||
|
||||
## 实施顺序
|
||||
|
||||
### Week 1: P0 阻塞问题
|
||||
1. [ ] P0-4: 运行 Smoke Test,识别失败点
|
||||
2. [ ] P0-1: EvoAgent 长期记忆支持
|
||||
3. [ ] P0-2: Pipeline 运行时统一
|
||||
4. [ ] P0-3: Workspace 路径清理
|
||||
5. [ ] 验证所有 Smoke Test 通过
|
||||
|
||||
### Week 2: P1 功能完善
|
||||
1. [ ] P1-1: Team 基础设施
|
||||
2. [ ] P1-2: ToolGuard 集成优化
|
||||
3. [ ] P1-3: Skills 沙盒执行
|
||||
|
||||
### Week 3: P2 代码质量
|
||||
1. [ ] P2-1: 统一 Agent 工厂
|
||||
2. [ ] P2-2: 类型注解
|
||||
3. [ ] P2-3: 集成测试
|
||||
4. [ ] P2-4: 文档完善
|
||||
|
||||
---
|
||||
|
||||
## 成功标准
|
||||
|
||||
### EvoAgent 全面推出标准
|
||||
1. ✅ 所有 6 个角色通过 smoke test
|
||||
2. ✅ 长期记忆功能正常工作
|
||||
3. ✅ 无需 `EVO_AGENT_IDS` 环境变量即可使用 EvoAgent
|
||||
4. ✅ Legacy Agent 代码标记为 deprecated
|
||||
5. ✅ 集成测试覆盖主要使用场景
|
||||
|
||||
### 架构清理标准
|
||||
1. ✅ `runs/<run_id>/` 是唯一的运行时数据来源
|
||||
2. ✅ `workspaces/` 仅用于设计时注册表
|
||||
3. ✅ 所有服务边界清晰,无循环依赖
|
||||
4. ✅ 文档和代码一致
|
||||
|
||||
---
|
||||
|
||||
## 风险和对策
|
||||
|
||||
| 风险 | 可能性 | 影响 | 对策 |
|
||||
|------|--------|------|------|
|
||||
| EvoAgent 与 Legacy 行为不一致 | 中 | 高 | 并行运行对比测试 |
|
||||
| 长期记忆集成复杂 | 中 | 中 | 分阶段实现,先支持基础功能 |
|
||||
| 性能下降 | 低 | 高 | 基准测试,性能剖析 |
|
||||
| 迁移期间系统不稳定 | 中 | 高 | 保持 Legacy 作为回退 |
|
||||
|
||||
---
|
||||
|
||||
*计划创建日期: 2026-04-01*
|
||||
*负责: Claude Code*
|
||||
29
docs/README.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# Docs Index
|
||||
|
||||
这个目录只保留当前仍有维护价值的文档。
|
||||
阅读顺序建议从“权威事实”开始,再看“兼容清理”和“规划说明”。
|
||||
|
||||
## 权威事实
|
||||
|
||||
- [`current-architecture.md`](./current-architecture.md) — 当前项目架构、运行路径和真相源定义
|
||||
- [`project-layout.md`](./project-layout.md) — 当前仓库目录职责与阅读方式
|
||||
- [`terminology.md`](./terminology.md) — runtime、run、workspace 等核心术语
|
||||
- [`runtime-api-changes.md`](./runtime-api-changes.md) — 当前 `runtime_service` API 约定与行为补充
|
||||
- [`current-architecture.excalidraw`](./current-architecture.excalidraw) — 架构图源文件
|
||||
|
||||
## 兼容与遗留清理
|
||||
|
||||
- [`legacy-inventory.md`](./legacy-inventory.md) — 兼容层、遗留代码和历史边界清单
|
||||
- [`compat-removal-plan.md`](./compat-removal-plan.md) — 兼容移除计划与收敛方向
|
||||
|
||||
## 规划说明
|
||||
|
||||
- [`development-roadmap.md`](./development-roadmap.md) — 后续演进方向和阶段性路线
|
||||
|
||||
## 使用原则
|
||||
|
||||
- 讨论“现在系统怎么工作”,优先看 `current-architecture.md`
|
||||
- 讨论“顶层目录应该怎么理解”,优先看 `project-layout.md`
|
||||
- 讨论“某个旧入口为什么还在”,优先看 `legacy-inventory.md`
|
||||
- 讨论“准备删哪些兼容路径”,优先看 `compat-removal-plan.md`
|
||||
- 不要把历史计划或阶段性修复记录当成当前事实源
|
||||
@@ -67,11 +67,11 @@ backward-compatible behavior while migration settles.
|
||||
|
||||
## Remaining Migration Risks
|
||||
|
||||
### Split service deployment is not yet the checked-in production default
|
||||
### Checked-in deployment artifacts still lag the development topology
|
||||
|
||||
- The repo documents split-service local development clearly.
|
||||
- The checked-in production example still centers on `backend.main` and nginx
|
||||
WebSocket proxying.
|
||||
- The checked-in deployment docs still center on historical nginx
|
||||
WebSocket proxying rather than the active dev topology.
|
||||
- This is a topology mismatch to keep in mind when changing deploy docs or prod
|
||||
automation.
|
||||
|
||||
@@ -93,13 +93,13 @@ backward-compatible behavior while migration settles.
|
||||
Migration can be considered effectively complete when all of the following are
|
||||
true:
|
||||
|
||||
1. Production deployment docs and scripts explicitly run the same split-service
|
||||
topology used in development, or intentionally document a different stable
|
||||
production topology.
|
||||
1. Deployment docs and scripts explicitly run the same split-service
|
||||
topology used in development, or are removed from the repo.
|
||||
2. Critical read paths no longer require ambiguous fallback behavior to local
|
||||
module implementations.
|
||||
3. OpenClaw integration is documented as a stable contract with clear guidance
|
||||
on when to use the WebSocket gateway versus the REST surface.
|
||||
on when to use the WebSocket gateway versus the small set of CLI-backed
|
||||
gateway read helpers.
|
||||
4. The frontend-service routing model is stable enough that direct-service and
|
||||
gateway-mediated paths are deliberate design choices rather than migration
|
||||
leftovers.
|
||||
@@ -137,9 +137,6 @@ Recommended next action:
|
||||
These still have an operational reason to exist and should be documented rather
|
||||
than treated as accidental leftovers.
|
||||
|
||||
- `backend.main`
|
||||
- compatibility gateway/runtime process
|
||||
- still relevant for websocket transport and current deploy topology
|
||||
- `runs/<run_id>/team_dashboard/*.json`
|
||||
- export/consumer compatibility layer
|
||||
- gateway-mediated websocket/event flow
|
||||
@@ -147,8 +144,8 @@ than treated as accidental leftovers.
|
||||
|
||||
Recommended next action:
|
||||
|
||||
- keep these, but document them as intentional compatibility surfaces with
|
||||
explicit ownership.
|
||||
- keep only surfaces with an active operational consumer, and avoid routing new
|
||||
development through them.
|
||||
|
||||
### 3. Defer Until Topology Decisions Are Final
|
||||
|
||||
@@ -157,8 +154,8 @@ churn without simplifying the current runtime.
|
||||
|
||||
- `workspaces/` design-time registry versus `runs/<run_id>/` runtime state
|
||||
- env-dependent service fallback behavior
|
||||
- checked-in deployment docs centered on `backend.main`
|
||||
- dual OpenClaw shapes: gateway integration and REST facade
|
||||
- checked-in deployment docs that have not yet been rewritten around split services
|
||||
- dual OpenClaw access patterns: gateway integration and CLI-backed read helpers
|
||||
|
||||
Recommended next action:
|
||||
|
||||
|
||||
@@ -386,13 +386,13 @@
|
||||
"updated": 1,
|
||||
"link": null,
|
||||
"locked": false,
|
||||
"text": "Gateway :8765\\nbackend.main\\nWebSocket + runtime orchestration",
|
||||
"text": "Gateway :8765\\nGateway process\\nWebSocket + runtime orchestration",
|
||||
"fontSize": 18,
|
||||
"fontFamily": 5,
|
||||
"textAlign": "center",
|
||||
"verticalAlign": "middle",
|
||||
"containerId": null,
|
||||
"originalText": "Gateway :8765\nbackend.main\nWebSocket + runtime orchestration",
|
||||
"originalText": "Gateway :8765\nGateway process\nWebSocket + runtime orchestration",
|
||||
"lineHeight": 1.2
|
||||
},
|
||||
{
|
||||
@@ -696,13 +696,13 @@
|
||||
"updated": 1,
|
||||
"link": null,
|
||||
"locked": false,
|
||||
"text": "OpenClaw WS :18789\\noptional REST :8004",
|
||||
"text": "OpenClaw WS :18789\\nCLI-backed reads via gateway",
|
||||
"fontSize": 20,
|
||||
"fontFamily": 5,
|
||||
"textAlign": "center",
|
||||
"verticalAlign": "middle",
|
||||
"containerId": null,
|
||||
"originalText": "OpenClaw WS :18789\noptional REST :8004",
|
||||
"originalText": "OpenClaw WS :18789\nCLI-backed reads via gateway",
|
||||
"lineHeight": 1.2
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Current Architecture
|
||||
|
||||
This file describes the current code-supported architecture only. Historical
|
||||
paths and partial migrations are intentionally excluded unless called out as
|
||||
legacy compatibility.
|
||||
paths and partial migrations are intentionally excluded unless brief historical
|
||||
context is needed to explain the current shape.
|
||||
|
||||
Reference material:
|
||||
|
||||
@@ -11,40 +11,21 @@ Reference material:
|
||||
- legacy inventory: [legacy-inventory.md](./legacy-inventory.md)
|
||||
- terminology guide: [terminology.md](./terminology.md)
|
||||
|
||||
## Runtime Modes
|
||||
## Runtime Mode
|
||||
|
||||
The system supports two distinct runtime modes:
|
||||
The supported runtime model is the split-service development architecture.
|
||||
|
||||
### Standalone Mode (Legacy Compatibility)
|
||||
|
||||
Direct Gateway startup via `backend.main` as a monolithic entrypoint.
|
||||
|
||||
```bash
|
||||
python -m backend.main --mode live --port 8765
|
||||
```
|
||||
|
||||
**Characteristics:**
|
||||
- Single process runs Gateway, Pipeline, Market Service, and Scheduler
|
||||
- No service discovery or process management
|
||||
- Suitable for single-node deployments and quick testing
|
||||
- All components share the same memory space
|
||||
|
||||
**Use cases:**
|
||||
- Quick local testing without service orchestration
|
||||
- Single-node production deployments
|
||||
- Backward compatibility with legacy startup scripts
|
||||
|
||||
### Microservice Mode (Default for Development)
|
||||
|
||||
Split-service architecture with dedicated runtime_service managing the Gateway lifecycle.
|
||||
Split-service architecture with a dedicated runtime API surface and a separate
|
||||
Gateway process.
|
||||
|
||||
```bash
|
||||
./start-dev.sh # Starts all services including runtime_service and Gateway
|
||||
```
|
||||
|
||||
**Characteristics:**
|
||||
- `runtime_service` (:8003) acts as Gateway Process Manager
|
||||
- Gateway runs as a subprocess managed by runtime_service
|
||||
- `runtime_service` (:8003) provides runtime lifecycle APIs
|
||||
- the checked-in `start-dev.sh` starts split services and lets `runtime_service` spawn Gateway
|
||||
- manual split-service flows can also let `runtime_service` spawn Gateway
|
||||
- Clear separation between Control Plane (runtime_service) and Data Plane (Gateway)
|
||||
- Service discovery via environment variables
|
||||
- Independent scaling and deployment of each service
|
||||
@@ -54,20 +35,7 @@ Split-service architecture with dedicated runtime_service managing the Gateway l
|
||||
- Multi-node deployments
|
||||
- Production environments requiring service isolation
|
||||
|
||||
## Mode Comparison
|
||||
|
||||
| Aspect | Standalone Mode | Microservice Mode |
|
||||
|--------|-----------------|-------------------|
|
||||
| **Entry point** | `python -m backend.main` | `./start-dev.sh` or individual services |
|
||||
| **Process model** | Single monolithic process | Multiple specialized processes |
|
||||
| **Gateway management** | Self-contained | Managed by runtime_service |
|
||||
| **Service discovery** | None (in-process) | Environment variable based |
|
||||
| **Hot reload** | Full restart required | Per-service reload |
|
||||
| **Scaling** | Vertical only | Horizontal possible |
|
||||
| **Complexity** | Lower | Higher |
|
||||
| **Use case** | Testing, simple deployments | Development, production |
|
||||
|
||||
## Default Runtime Shape (Microservice Mode)
|
||||
## Default Runtime Shape
|
||||
|
||||
The active runtime path is:
|
||||
|
||||
@@ -83,7 +51,6 @@ Current service surfaces:
|
||||
- read-only explain/news APIs
|
||||
- `backend.apps.runtime_service` on `:8003`
|
||||
- runtime lifecycle and gateway process management
|
||||
- optional OpenClaw REST facade
|
||||
- gateway WebSocket on `:8765`
|
||||
- live feed/event transport and pipeline coordination
|
||||
|
||||
@@ -163,11 +130,11 @@ use `docker` mode with appropriate resource limits.
|
||||
|
||||
### Current State
|
||||
|
||||
The system is in a transitional state:
|
||||
The system is in an active development state:
|
||||
|
||||
1. **Microservice infrastructure is operational** - runtime_service can start/stop Gateway as subprocess
|
||||
2. **Pipeline logic remains in Gateway** - full Pipeline execution still happens within Gateway process
|
||||
3. **Standalone mode is preserved** - direct `backend.main` startup for compatibility
|
||||
3. **Direct gateway startup has been removed** - the repository now exposes a single supported startup model
|
||||
|
||||
### Future Direction
|
||||
|
||||
@@ -184,9 +151,9 @@ Phase 3: Pipeline decomposition (planned)
|
||||
- Gateway becomes a thin event router
|
||||
- runtime_service evolves into full orchestrator
|
||||
|
||||
Phase 4: Standalone mode deprecation (future)
|
||||
- Remove direct `backend.main` entry point
|
||||
- All deployments use microservice mode
|
||||
Phase 4: Deployment convergence (future)
|
||||
- Remove or rewrite historical deployment artifacts
|
||||
- Keep all documented startup paths aligned with `runtime_service`
|
||||
|
||||
## Legacy Compatibility
|
||||
|
||||
@@ -194,8 +161,8 @@ These items still exist, but they are not the recommended source of truth for
|
||||
new development:
|
||||
|
||||
- root-level runtime data directories such as `live/`, `production/`, `backtest/`
|
||||
- direct `backend.main` startup as the primary development path
|
||||
- historical documentation gaps that have not yet been fully rewritten
|
||||
|
||||
The current runtime still creates legacy `AnalystAgent` / `RiskAgent` /
|
||||
`PMAgent` instances directly. EvoAgent remains an in-progress migration target,
|
||||
not the default execution path.
|
||||
Legacy fallback agent paths still exist in compatibility-oriented creation
|
||||
flows, but the default `TradingPipeline` runtime now prefers `EvoAgent` for the
|
||||
supported roles unless rollout settings explicitly reduce that set.
|
||||
|
||||
@@ -63,14 +63,14 @@ Goal: preserve only intentional compatibility layers.
|
||||
|
||||
Planned work:
|
||||
|
||||
- identify startup scripts and deploy artifacts that still center on
|
||||
`backend.main` as a monolithic entrypoint
|
||||
- identify any remaining deployment docs that still lag the split-service topology
|
||||
- classify compatibility surfaces into:
|
||||
- stable and intentional
|
||||
- temporary and shrinking
|
||||
- removable once replacements are fully active
|
||||
- reduce env-dependent fallback ambiguity for read-only service routing where practical
|
||||
- document the difference between OpenClaw WebSocket integration and the optional REST facade
|
||||
- document the difference between OpenClaw WebSocket integration and the small
|
||||
set of CLI-backed gateway read helpers
|
||||
|
||||
Definition of done:
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ These are the current defaults to build against:
|
||||
- `backend.apps.trading_service` on `:8001`
|
||||
- `backend.apps.news_service` on `:8002`
|
||||
- gateway process
|
||||
- `backend.main`
|
||||
- `backend.gateway_server` in the default managed-runtime path
|
||||
- `backend.services.gateway` on `:8765`
|
||||
|
||||
## Compatibility Surface Classification
|
||||
@@ -35,7 +35,6 @@ compatibility surfaces with explicit ownership.
|
||||
|
||||
| Surface | Location | Owner | Reason |
|
||||
|---------|----------|-------|--------|
|
||||
| Gateway-first production | `scripts/run_prod.sh`, `deploy/systemd/`, `deploy/nginx/` | ops-team | Current production example runs gateway directly and proxies `/ws` |
|
||||
| Dashboard export layer | `runs/<run_id>/team_dashboard/*.json` | frontend-team | Downstream dashboard consumers read these exports |
|
||||
| Design-time workspace registry | `workspaces/`, `backend.api.workspaces` | control-plane-team | Control-plane editing and registry-style management |
|
||||
| Gateway WebSocket transport | `backend.services.gateway` on `:8765` | runtime-team | Live event streaming contract for frontend |
|
||||
@@ -50,8 +49,8 @@ in use.
|
||||
|
||||
| Surface | Location | Replacement | ETA |
|
||||
|---------|----------|-------------|-----|
|
||||
| Legacy analyst agents | `backend.agents.analyst.*` | `EvoAgent` | After EvoAgent smoke tests pass |
|
||||
| Mixed workspace_id semantics | `/api/workspaces/{id}/agents/...` | ✅ `/api/runs/{run_id}/agents/...` routes added | Completed |
|
||||
| Legacy agent fallback paths | compatibility constructors in `backend.core.pipeline_runner` | `EvoAgent` | After fallback-free runtime cutover |
|
||||
| Mixed workspace_id semantics | removed from runtime agent routes | ✅ `/api/runs/{run_id}/agents/...` is the only runtime agent route | Completed |
|
||||
| Root-level runtime directories | `live/`, `backtest/`, `production/` | `runs/<run_id>/` | ✅ Removed, backed up to runs/_legacy/ |
|
||||
|
||||
**Status**: Do not add new code using these surfaces. Migrate existing usage
|
||||
@@ -67,43 +66,13 @@ topology and service-routing policy are frozen.
|
||||
|---------|---------------|-----------------|
|
||||
| OpenClaw integration | Gateway WebSocket (port 18789) | Stable — REST facade removed |
|
||||
| Env-dependent service fallbacks | `TRADING_SERVICE_URL`, `NEWS_SERVICE_URL` fallbacks to local modules | Remove fallbacks and require explicit URLs? |
|
||||
| Split-service production deploy | Docs show gateway-first, dev uses split-service | Align production with dev topology? |
|
||||
| Split-service deployment docs | Deployment docs are still partial compared with the dev topology | Rewrite deploy docs around split services? |
|
||||
|
||||
**Status**: Document current behavior. Do not actively remove until topology
|
||||
decisions are finalized.
|
||||
|
||||
## Detailed Surface Documentation
|
||||
|
||||
### Gateway-First Production Example
|
||||
|
||||
**Files**:
|
||||
- `scripts/run_prod.sh` - Production launch script
|
||||
- `deploy/systemd/evotraders.service` - systemd unit
|
||||
- `deploy/nginx/bigtime.cillinn.com.conf` - HTTPS + WebSocket proxy
|
||||
- `deploy/nginx/bigtime.cillinn.com.http.conf` - HTTP variant
|
||||
|
||||
**Behavior**:
|
||||
```bash
|
||||
# scripts/run_prod.sh launches:
|
||||
python3 -m backend.main \
|
||||
--mode live \
|
||||
--config-name production \
|
||||
--host 127.0.0.1 \
|
||||
--port 8765
|
||||
```
|
||||
|
||||
**nginx proxies**:
|
||||
- `/ws` -> `127.0.0.1:8765` (WebSocket upgrade)
|
||||
- `/` -> static files in `/var/www/bigtime/current`
|
||||
|
||||
**Why this exists**:
|
||||
- Simpler production deployment (single process + nginx)
|
||||
- WebSocket is the practical live event contract for frontend
|
||||
- Split-service topology adds operational complexity not needed for all deployments
|
||||
|
||||
**Ownership**: ops-team
|
||||
**Status**: Stable and intentional
|
||||
|
||||
### Dashboard Export Layer
|
||||
|
||||
**Files**: `runs/<run_id>/team_dashboard/*.json`
|
||||
@@ -154,19 +123,14 @@ These remain in the tree, but they should not define the architecture for new wo
|
||||
- treat these as historical or compatibility-oriented data/layout artifacts
|
||||
- do not use them as the default runtime contract for new features
|
||||
|
||||
### Mixed `workspace_id` semantics on agent routes
|
||||
### Historical mixed `workspace_id` semantics on agent routes
|
||||
|
||||
- `/api/workspaces/{workspace_id}/agents/...`
|
||||
This compatibility shape has been removed from runtime agent routes.
|
||||
|
||||
**Read**:
|
||||
**Current rule**:
|
||||
|
||||
- design-time CRUD routes use `workspace_id` as a registry workspace id
|
||||
- profile, skills, and editable file routes use `workspace_id` as a run id
|
||||
|
||||
**Mitigation already in repo**:
|
||||
|
||||
- `agent_service /api/status` exposes scope metadata
|
||||
- runtime-read responses expose `scope_type` and `scope_note`
|
||||
- design-time CRUD routes use `/api/workspaces/{workspace_id}/agents/...`
|
||||
- runtime agent assets use `/api/runs/{run_id}/agents/...`
|
||||
|
||||
### Partial EvoAgent rollout
|
||||
|
||||
@@ -175,8 +139,10 @@ These remain in the tree, but they should not define the architecture for new wo
|
||||
|
||||
**Read**:
|
||||
|
||||
- EvoAgent is still a controlled rollout path
|
||||
- legacy analyst/risk/PM implementations remain the default runtime path for now
|
||||
- EvoAgent is the default selection path for supported roles in the current
|
||||
pipeline
|
||||
- legacy implementations remain as compatibility fallbacks in selected startup
|
||||
and runner paths
|
||||
|
||||
## Recommended Usage
|
||||
|
||||
@@ -185,13 +151,14 @@ When in doubt:
|
||||
1. trust `docs/current-architecture.md`
|
||||
2. trust `runs/<run_id>/` over root-level runtime directories
|
||||
3. treat `workspaces/` as control-plane registry, not runtime truth
|
||||
4. treat deploy artifacts as the current checked-in example, not the full system contract
|
||||
4. treat deploy artifacts as partial references, not the full system contract
|
||||
5. check this file's **Compatibility Surface Classification** before assuming something is legacy
|
||||
6. prefer `runtime_service`-managed startup for all new work
|
||||
|
||||
## Change Log
|
||||
|
||||
| Date | Change |
|
||||
|------|--------|
|
||||
| 2026-03-31 | Added Compatibility Surface Classification (3 buckets) |
|
||||
| 2026-03-31 | Documented OpenClaw dual integration (REST vs WebSocket) |
|
||||
| 2026-03-31 | Clarified OpenClaw integration documentation |
|
||||
| 2026-03-31 | Added ownership and status to all surfaces |
|
||||
|
||||
80
docs/project-layout.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# Project Layout
|
||||
|
||||
这个文件只描述当前仓库目录的职责划分,不记录历史迁移过程。
|
||||
|
||||
如果你想知道“系统现在怎么工作”,优先看 `current-architecture.md`。
|
||||
如果你想知道“这个目录现在应该怎么理解”,看本文。
|
||||
|
||||
## 顶层目录
|
||||
|
||||
### 主线代码
|
||||
|
||||
- `backend/`
|
||||
- 后端运行时、服务、API、Pipeline、Agent、工具与测试
|
||||
- `frontend/`
|
||||
- React/Vite 前端
|
||||
- `shared/`
|
||||
- 前后端共享 schema 与 client 封装
|
||||
|
||||
### 当前有效的数据与状态目录
|
||||
|
||||
- `runs/`
|
||||
- 运行态真相源
|
||||
- 每个 `run_id` 下保存 BOOTSTRAP、agent 资产、state、logs、dashboard 导出
|
||||
- `workspaces/`
|
||||
- 设计时注册表
|
||||
- 用于 control-plane CRUD,不是默认运行时执行目录
|
||||
- `data/`
|
||||
- 项目使用的数据资产与本地数据文件
|
||||
|
||||
### 文档与部署
|
||||
|
||||
- `docs/`
|
||||
- 当前仍维护的架构、兼容边界、路线图、目录说明
|
||||
- `services/`
|
||||
- 服务边界说明
|
||||
- `deploy/`
|
||||
- 部署示例、systemd、nginx 配置
|
||||
- `scripts/`
|
||||
- 启动、检查、烟测与辅助脚本
|
||||
|
||||
### 项目入口与元数据
|
||||
|
||||
- `README.md`
|
||||
- 英文主说明
|
||||
- `README_zh.md`
|
||||
- 中文主说明
|
||||
- `pyproject.toml`
|
||||
- Python 项目元数据与依赖
|
||||
- `start-dev.sh`
|
||||
- 本地默认开发入口
|
||||
- `start.sh`
|
||||
- 偏生产风格的本地启动入口
|
||||
|
||||
## 本地环境目录
|
||||
|
||||
这些目录通常只对当前机器有效,不应作为架构事实源:
|
||||
|
||||
- `.venv/`
|
||||
- 本地 Python 虚拟环境
|
||||
- `.pydeps/`
|
||||
- 本地依赖落地目录
|
||||
- `.omc/`
|
||||
- 本地工具状态
|
||||
- `.codex/`
|
||||
- 本地代理/工具状态
|
||||
|
||||
## 阅读原则
|
||||
|
||||
- 看运行时行为,优先从 `backend/`、`frontend/`、`runs/` 开始
|
||||
- 看控制面编辑与设计态资产,优先看 `workspaces/`
|
||||
- 看服务边界,优先看 `services/README.md`
|
||||
- 看目录时,不要把本地环境目录当成项目结构的一部分
|
||||
- 新增目录前,先判断它属于“主线代码 / 运行态数据 / 文档部署 / 本地环境”中的哪一类
|
||||
|
||||
## 当前约束
|
||||
|
||||
- 不再新增参考仓、副本仓、样例快照目录到顶层
|
||||
- 不再把测试输出、PID、构建产物、缓存目录提交到仓库
|
||||
- 运行态相关的新文件优先放到 `runs/<run_id>/`
|
||||
- 设计态注册相关的新文件优先放到 `workspaces/`
|
||||
@@ -1,8 +1,26 @@
|
||||
# Runtime Service API 变更文档
|
||||
# Runtime Service API 说明
|
||||
|
||||
## 概述
|
||||
|
||||
本文档描述了 `runtime_service` API 的改进,包括新增端点、增强的响应字段和改进的错误处理。
|
||||
本文档描述当前 `runtime_service` API 的关键端点、增强响应字段和启动/健康检查相关行为。
|
||||
它应被视为 runtime API 的补充说明,而不是迁移过程中的一次性变更记录。
|
||||
|
||||
## 适用范围
|
||||
|
||||
- 运行时控制面:`backend.apps.runtime_service`
|
||||
- 运行时 API 前缀:`/api/runtime/*`
|
||||
- 当前推荐配套阅读:
|
||||
- `README.md`
|
||||
- `README_zh.md`
|
||||
- `docs/current-architecture.md`
|
||||
|
||||
## 关于 `schedule_mode` 的当前约定
|
||||
|
||||
当前对外约定统一使用 `daily` / `interval`。
|
||||
|
||||
- `interval` 是当前公开的盘中轮询名称
|
||||
- `intraday` 仅作为向后兼容输入别名保留
|
||||
- runtime API 的响应和写回配置应优先输出 `interval`
|
||||
|
||||
## 新增端点
|
||||
|
||||
@@ -26,7 +44,8 @@
|
||||
- `mode`: 运行模式,`"live"`(实盘)或 `"backtest"`(回测),运行时停止时为 `"stopped"`
|
||||
- `is_backtest`: 是否为回测模式
|
||||
- `run_id`: 当前运行的任务 ID
|
||||
- `schedule_mode`: 调度模式,`"daily"` 或 `"intraday"`
|
||||
- `schedule_mode`: 调度模式字段。当前公开值为 `daily` 或 `interval`;
|
||||
历史输入别名 `intraday` 会在服务端归一化为 `interval`。
|
||||
- `is_running`: Gateway 是否正在运行
|
||||
|
||||
---
|
||||
@@ -113,7 +132,7 @@
|
||||
|
||||
---
|
||||
|
||||
## 改进的端点
|
||||
## 关键端点说明
|
||||
|
||||
### GET /api/runtime/gateway/status
|
||||
|
||||
@@ -256,7 +275,8 @@ Gateway 启动前会自动验证以下配置:
|
||||
- `start_date` 必须早于 `end_date`
|
||||
|
||||
6. **调度模式**
|
||||
- `schedule_mode` 必须是 `"daily"` 或 `"intraday"`
|
||||
- 当前公开校验值为 `"daily"` / `"interval"`
|
||||
- 历史输入 `"intraday"` 会被兼容性归一化为 `"interval"`
|
||||
|
||||
**验证失败响应**:
|
||||
|
||||
|
||||
@@ -51,19 +51,18 @@ Examples:
|
||||
|
||||
## Compatibility Rule
|
||||
|
||||
Some API paths and fields still use legacy names:
|
||||
Some fields still use legacy names:
|
||||
|
||||
- `/api/workspaces/{workspace_id}/agents/...`
|
||||
- `workspace_id` on approval records
|
||||
|
||||
When reading those surfaces:
|
||||
Current API path rule:
|
||||
|
||||
- design-time CRUD routes use `workspace_id` literally
|
||||
- runtime-read routes may use the same slot for `run_id`
|
||||
- design-time CRUD uses `/api/workspaces/{workspace_id}/...`
|
||||
- runtime agent assets use `/api/runs/{run_id}/agents/...`
|
||||
|
||||
For new code:
|
||||
|
||||
- prefer `runId` for runtime variables
|
||||
- prefer `runId` for runtime variables and runtime API paths
|
||||
- prefer `workspaceId` only for design-time registry flows
|
||||
|
||||
## UI Wording
|
||||
|
||||
@@ -24,7 +24,7 @@ That gives you:
|
||||
- trading service at `http://localhost:8001`
|
||||
- news service at `http://localhost:8002`
|
||||
- runtime service at `http://localhost:8003/api/runtime`
|
||||
- gateway WebSocket at `ws://localhost:8765`
|
||||
- gateway WebSocket at `ws://localhost:8765` started directly by `start-dev.sh`
|
||||
|
||||
## Frontend Environment Variables
|
||||
|
||||
|
||||
BIN
frontend/public/media/0.png
Normal file
|
After Width: | Height: | Size: 107 KiB |
BIN
frontend/public/media/1.png
Normal file
|
After Width: | Height: | Size: 115 KiB |
BIN
frontend/public/media/10.png
Normal file
|
After Width: | Height: | Size: 141 KiB |
BIN
frontend/public/media/11.png
Normal file
|
After Width: | Height: | Size: 146 KiB |
BIN
frontend/public/media/2.png
Normal file
|
After Width: | Height: | Size: 117 KiB |
BIN
frontend/public/media/3.png
Normal file
|
After Width: | Height: | Size: 140 KiB |
BIN
frontend/public/media/4.png
Normal file
|
After Width: | Height: | Size: 147 KiB |
BIN
frontend/public/media/5.png
Normal file
|
After Width: | Height: | Size: 106 KiB |
BIN
frontend/public/media/6.png
Normal file
|
After Width: | Height: | Size: 124 KiB |
BIN
frontend/public/media/7.png
Normal file
|
After Width: | Height: | Size: 117 KiB |
BIN
frontend/public/media/8.png
Normal file
|
After Width: | Height: | Size: 151 KiB |
BIN
frontend/public/media/9.png
Normal file
|
After Width: | Height: | Size: 150 KiB |
@@ -8,12 +8,17 @@ import { useFeedProcessor } from './hooks/useFeedProcessor';
|
||||
import { useRuntimeControls } from './hooks/useRuntimeControls';
|
||||
import { useStockDataRequests } from './hooks/useStockDataRequests';
|
||||
import { useWebSocketConnection } from './hooks/useWebSocketConnection';
|
||||
import { fetchRuntimeLogs } from './services/runtimeApi';
|
||||
import { fetchRuntimeAgents, fetchRuntimeLogs } from './services/runtimeApi';
|
||||
import { useAgentRunFileState, useAgentStore } from './store/agentStore';
|
||||
import { useMarketStore } from './store/marketStore';
|
||||
import { usePortfolioStore } from './store/portfolioStore';
|
||||
import { useRuntimeStore } from './store/runtimeStore';
|
||||
import { useUIStore } from './store/uiStore';
|
||||
import {
|
||||
buildRuntimeAgentMeta,
|
||||
findAgentByIdOrName,
|
||||
sortRuntimeAgents,
|
||||
} from './utils/agentDisplay';
|
||||
|
||||
const EDITABLE_AGENT_WORKSPACE_FILES = [
|
||||
'SOUL.md',
|
||||
@@ -174,21 +179,57 @@ export default function LiveTradingApp() {
|
||||
const [isRuntimeLogsLoading, setIsRuntimeLogsLoading] = useState(false);
|
||||
const [runtimeLogsPayload, setRuntimeLogsPayload] = useState(null);
|
||||
const [runtimeLogsError, setRuntimeLogsError] = useState(null);
|
||||
const [runtimeAgents, setRuntimeAgents] = useState([]);
|
||||
const agentFeedRef = useRef(null);
|
||||
const isSocketReady = isConnected && connectionStatus === 'connected';
|
||||
|
||||
const selectedAgentId = selectedSkillAgentId || AGENTS[0]?.id || null;
|
||||
const resolvedAgents = useMemo(() => {
|
||||
if (!Array.isArray(runtimeAgents) || runtimeAgents.length === 0) {
|
||||
return AGENTS;
|
||||
}
|
||||
|
||||
return sortRuntimeAgents(runtimeAgents).map((agentState, index) => {
|
||||
const agentId = String(agentState?.agent_id || agentState?.id || '').trim();
|
||||
const base = buildRuntimeAgentMeta(agentId, index);
|
||||
const displayName = typeof agentState?.display_name === 'string' ? agentState.display_name.trim() : '';
|
||||
return {
|
||||
...base,
|
||||
id: agentId,
|
||||
name: displayName || base.name,
|
||||
runtimeStatus: agentState?.status || null,
|
||||
lastSession: agentState?.last_session || null,
|
||||
lastUpdated: agentState?.last_updated || null,
|
||||
};
|
||||
}).filter((agent) => agent.id);
|
||||
}, [runtimeAgents]);
|
||||
|
||||
const selectedAgentId = selectedSkillAgentId || resolvedAgents[0]?.id || null;
|
||||
const selectedAgentProfile = selectedAgentId ? (agentProfilesByAgent[selectedAgentId] || null) : null;
|
||||
const selectedAgentSkills = selectedAgentId ? (agentSkillsByAgent[selectedAgentId] || []) : [];
|
||||
const selectedRunFileContent = selectedAgentId && selectedRunFile
|
||||
? (runFilesByAgent[selectedAgentId]?.[selectedRunFile] || '')
|
||||
: '';
|
||||
|
||||
useEffect(() => {
|
||||
if (!selectedSkillAgentId && AGENTS.length > 0) {
|
||||
setSelectedSkillAgentId(AGENTS[0].id);
|
||||
const loadRuntimeAgentsList = useCallback(async () => {
|
||||
try {
|
||||
const payload = await fetchRuntimeAgents();
|
||||
setRuntimeAgents(Array.isArray(payload?.agents) ? payload.agents : []);
|
||||
} catch {
|
||||
setRuntimeAgents([]);
|
||||
}
|
||||
}, [selectedSkillAgentId, setSelectedSkillAgentId]);
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
if (!selectedSkillAgentId && resolvedAgents.length > 0) {
|
||||
setSelectedSkillAgentId(resolvedAgents[0].id);
|
||||
}
|
||||
}, [resolvedAgents, selectedSkillAgentId, setSelectedSkillAgentId]);
|
||||
|
||||
useEffect(() => {
|
||||
if (selectedSkillAgentId && !resolvedAgents.some((agent) => agent.id === selectedSkillAgentId)) {
|
||||
setSelectedSkillAgentId(resolvedAgents[0]?.id || null);
|
||||
}
|
||||
}, [resolvedAgents, selectedSkillAgentId, setSelectedSkillAgentId]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!selectedRunFile) {
|
||||
@@ -196,6 +237,37 @@ export default function LiveTradingApp() {
|
||||
}
|
||||
}, [selectedRunFile, setSelectedWorkspaceFile]);
|
||||
|
||||
useEffect(() => {
|
||||
void loadRuntimeAgentsList();
|
||||
}, [loadRuntimeAgentsList]);
|
||||
|
||||
useEffect(() => {
|
||||
const handleRuntimeAgentsUpdated = () => {
|
||||
void loadRuntimeAgentsList();
|
||||
};
|
||||
window.addEventListener('runtime-agents-updated', handleRuntimeAgentsUpdated);
|
||||
return () => {
|
||||
window.removeEventListener('runtime-agents-updated', handleRuntimeAgentsUpdated);
|
||||
};
|
||||
}, [loadRuntimeAgentsList]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!isSocketReady) {
|
||||
return;
|
||||
}
|
||||
void loadRuntimeAgentsList();
|
||||
}, [isSocketReady, loadRuntimeAgentsList]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!selectedAgentId || !selectedRunFile) {
|
||||
setRunDraftContent('');
|
||||
return;
|
||||
}
|
||||
|
||||
const cachedContent = runFilesByAgent[selectedAgentId]?.[selectedRunFile];
|
||||
setRunDraftContent(typeof cachedContent === 'string' ? cachedContent : '');
|
||||
}, [runFilesByAgent, selectedAgentId, selectedRunFile, setRunDraftContent]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!isSocketReady || !selectedAgentId || !clientRef.current) {
|
||||
return;
|
||||
@@ -233,7 +305,7 @@ export default function LiveTradingApp() {
|
||||
return;
|
||||
}
|
||||
|
||||
AGENTS.forEach((agent) => {
|
||||
resolvedAgents.forEach((agent) => {
|
||||
if (!agent?.id) {
|
||||
return;
|
||||
}
|
||||
@@ -246,6 +318,7 @@ export default function LiveTradingApp() {
|
||||
clientRef,
|
||||
isSocketReady,
|
||||
requestAgentProfile,
|
||||
resolvedAgents,
|
||||
]);
|
||||
|
||||
useEffect(() => {
|
||||
@@ -326,13 +399,13 @@ export default function LiveTradingApp() {
|
||||
const bubbleFor = useCallback((idOrName) => {
|
||||
let bubble = bubbles[idOrName];
|
||||
if (bubble) return bubble;
|
||||
const agent = AGENTS.find((item) => item.name === idOrName || item.id === idOrName);
|
||||
const agent = findAgentByIdOrName(resolvedAgents, idOrName);
|
||||
if (agent) {
|
||||
bubble = bubbles[agent.id];
|
||||
if (bubble) return bubble;
|
||||
}
|
||||
return null;
|
||||
}, [bubbles]);
|
||||
}, [bubbles, resolvedAgents]);
|
||||
|
||||
const handleManualTrigger = useCallback(() => {
|
||||
if (!isSocketReady || !clientRef.current) {
|
||||
@@ -361,7 +434,7 @@ export default function LiveTradingApp() {
|
||||
}, []);
|
||||
|
||||
const agentRequests = {
|
||||
agents: AGENTS,
|
||||
agents: resolvedAgents,
|
||||
agentProfilesByAgent,
|
||||
agentSkillsByAgent,
|
||||
runFilesByAgent,
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import React, { useState, useRef, useImperativeHandle, forwardRef } from 'react';
|
||||
import { formatTime } from '../utils/formatters';
|
||||
import { MESSAGE_COLORS, getAgentColors, AGENTS, ASSETS } from '../config/constants';
|
||||
import { MESSAGE_COLORS, getAgentColors, ASSETS } from '../config/constants';
|
||||
import { getModelIcon } from '../utils/modelIcons';
|
||||
import MarkdownModal from './MarkdownModal';
|
||||
import LobeModelLogo from './LobeModelLogo.jsx';
|
||||
import { findAgentByIdOrName, humanizeAgentId } from '../utils/agentDisplay';
|
||||
|
||||
const isAnalyst = (agentId, agentName) => {
|
||||
if (agentId && agentId.includes('analyst')) return true;
|
||||
@@ -36,7 +37,7 @@ const stripMarkdown = (text) => {
|
||||
.replace(/^[-=]+$/gm, '');
|
||||
};
|
||||
|
||||
const AgentFeed = forwardRef(({ feed, leaderboard, agentProfilesByAgent }, ref) => {
|
||||
const AgentFeed = forwardRef(({ agents = [], feed, leaderboard, agentProfilesByAgent }, ref) => {
|
||||
const feedContentRef = useRef(null);
|
||||
const [highlightedId, setHighlightedId] = useState(null);
|
||||
const [selectedAgent, setSelectedAgent] = useState('all');
|
||||
@@ -62,7 +63,7 @@ const AgentFeed = forwardRef(({ feed, leaderboard, agentProfilesByAgent }, ref)
|
||||
// Get agent info by name
|
||||
const getAgentInfoByName = (agentName) => {
|
||||
if (!agentName) return null;
|
||||
const agentConfig = AGENTS.find((agent) => agent.name === agentName);
|
||||
const agentConfig = findAgentByIdOrName(agents, agentName);
|
||||
const profile = agentConfig ? agentProfilesByAgent?.[agentConfig.id] : null;
|
||||
if (agentConfig && profile?.model_name) {
|
||||
return {
|
||||
@@ -81,7 +82,7 @@ const AgentFeed = forwardRef(({ feed, leaderboard, agentProfilesByAgent }, ref)
|
||||
};
|
||||
};
|
||||
|
||||
// Get unique agent names from feed (only registered agents in AGENTS)
|
||||
// Get unique agent names from feed using the current runtime agent list.
|
||||
const getUniqueAgents = () => {
|
||||
const agentNamesInFeed = new Set();
|
||||
|
||||
@@ -98,9 +99,10 @@ const AgentFeed = forwardRef(({ feed, leaderboard, agentProfilesByAgent }, ref)
|
||||
}
|
||||
});
|
||||
|
||||
// Filter to only include registered agents and sort by AGENTS array order
|
||||
const registeredAgentNames = AGENTS.map(a => a.name);
|
||||
return registeredAgentNames.filter(name => agentNamesInFeed.has(name));
|
||||
const orderedRuntimeNames = agents.map((agent) => agent.name);
|
||||
const knownNames = orderedRuntimeNames.filter(name => agentNamesInFeed.has(name));
|
||||
const extraNames = [...agentNamesInFeed].filter(name => !orderedRuntimeNames.includes(name));
|
||||
return [...knownNames, ...extraNames];
|
||||
};
|
||||
|
||||
// Filter feed based on selected agent
|
||||
@@ -177,6 +179,12 @@ const AgentFeed = forwardRef(({ feed, leaderboard, agentProfilesByAgent }, ref)
|
||||
|
||||
const currentSelection = getCurrentSelectionInfo();
|
||||
|
||||
const resolveAgentDisplayName = (name, agentId) => {
|
||||
if (name) return name;
|
||||
const agent = findAgentByIdOrName(agents, agentId);
|
||||
return agent?.name || humanizeAgentId(agentId);
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="agent-feed">
|
||||
<div className="agent-feed-header">
|
||||
@@ -241,7 +249,7 @@ const AgentFeed = forwardRef(({ feed, leaderboard, agentProfilesByAgent }, ref)
|
||||
type="color"
|
||||
/>
|
||||
)}
|
||||
<span>{agent}</span>
|
||||
<span>{resolveAgentDisplayName(agent, agentInfo?.agentId)}</span>
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
@@ -255,7 +263,7 @@ const AgentFeed = forwardRef(({ feed, leaderboard, agentProfilesByAgent }, ref)
|
||||
<div className="empty-state">
|
||||
{selectedAgent === 'all'
|
||||
? '等待系统更新...'
|
||||
: `${selectedAgent} 没有消息`}
|
||||
: `${resolveAgentDisplayName(selectedAgent, currentSelection.agentInfo?.agentId)} 没有消息`}
|
||||
</div>
|
||||
)}
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ import GlobalStyles from '../styles/GlobalStyles';
|
||||
import Header from './Header.jsx';
|
||||
import RuntimeSettingsPanel from './RuntimeSettingsPanel.jsx';
|
||||
import NetValueChart from './NetValueChart.jsx';
|
||||
import { AGENTS } from '../config/constants';
|
||||
import { useRuntimeStore } from '../store/runtimeStore';
|
||||
import { useUIStore } from '../store/uiStore';
|
||||
import { formatNumber, formatTickerPrice } from '../utils/formatters';
|
||||
@@ -401,6 +400,7 @@ export default function AppShell({
|
||||
<div className="view-panel">
|
||||
<Suspense fallback={<ViewLoadingFallback label="加载交易室..." />}>
|
||||
<RoomView
|
||||
agents={agentRequests.agents}
|
||||
bubbles={bubbles}
|
||||
bubbleFor={bubbleFor}
|
||||
leaderboard={leaderboard}
|
||||
@@ -501,7 +501,7 @@ export default function AppShell({
|
||||
{/* Right Panel: Agent Feed */}
|
||||
<div className="right-panel" style={{ width: `${100 - leftWidth}%` }}>
|
||||
<Suspense fallback={<ViewLoadingFallback label="加载消息流..." />}>
|
||||
<AgentFeed ref={agentFeedRef} feed={feed} leaderboard={leaderboard} agentProfilesByAgent={agentProfilesByAgent} />
|
||||
<AgentFeed ref={agentFeedRef} agents={agentRequests.agents} feed={feed} leaderboard={leaderboard} agentProfilesByAgent={agentProfilesByAgent} />
|
||||
</Suspense>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
import React, { useEffect, useMemo, useRef, useState, useCallback } from 'react';
|
||||
import { ASSETS, SCENE_NATIVE, AGENT_SEATS, AGENTS } from '../config/constants';
|
||||
import { ASSETS, SCENE_NATIVE, AGENT_SEATS } from '../config/constants';
|
||||
import AgentCard from './AgentCard';
|
||||
import { getModelIcon } from '../utils/modelIcons';
|
||||
import LobeModelLogo from './LobeModelLogo.jsx';
|
||||
import { findAgentByIdOrName } from '../utils/agentDisplay';
|
||||
|
||||
/**
|
||||
* Custom hook to load an image
|
||||
@@ -48,7 +49,22 @@ function getRankMedal(rank) {
|
||||
* Supports click and hover (1.5s) to show agent performance cards
|
||||
* Supports replay mode - completely independent from live mode
|
||||
*/
|
||||
export default function RoomView({ bubbles, bubbleFor, leaderboard, agentProfilesByAgent, feed, onJumpToMessage, onOpenLaunchConfig }) {
|
||||
function getSeatPosition(index) {
|
||||
if (AGENT_SEATS[index]) {
|
||||
return AGENT_SEATS[index];
|
||||
}
|
||||
|
||||
const overflowIndex = index - AGENT_SEATS.length;
|
||||
const columns = 3;
|
||||
const row = Math.floor(overflowIndex / columns);
|
||||
const column = overflowIndex % columns;
|
||||
return {
|
||||
x: 0.18 + (column * 0.18),
|
||||
y: Math.max(0.14, 0.22 - (row * 0.1)),
|
||||
};
|
||||
}
|
||||
|
||||
export default function RoomView({ agents = [], bubbles, bubbleFor, leaderboard, agentProfilesByAgent, feed, onJumpToMessage, onOpenLaunchConfig }) {
|
||||
const canvasRef = useRef(null);
|
||||
const containerRef = useRef(null);
|
||||
|
||||
@@ -152,16 +168,16 @@ export default function RoomView({ bubbles, bubbleFor, leaderboard, agentProfile
|
||||
// Determine which agents are speaking
|
||||
const speakingAgents = useMemo(() => {
|
||||
const speaking = {};
|
||||
AGENTS.forEach(agent => {
|
||||
agents.forEach(agent => {
|
||||
const bubble = bubbleFor(agent.name);
|
||||
speaking[agent.id] = !!bubble;
|
||||
});
|
||||
return speaking;
|
||||
}, [bubbles, bubbleFor]);
|
||||
}, [agents, bubbleFor, bubbles]);
|
||||
|
||||
// Find agent data from leaderboard
|
||||
const getAgentData = (agentId) => {
|
||||
const agent = AGENTS.find(a => a.id === agentId);
|
||||
const agent = agents.find(a => a.id === agentId);
|
||||
if (!agent) return null;
|
||||
const profile = agentProfilesByAgent?.[agentId] || null;
|
||||
|
||||
@@ -195,7 +211,7 @@ export default function RoomView({ bubbles, bubbleFor, leaderboard, agentProfile
|
||||
};
|
||||
}
|
||||
|
||||
// Merge data but preserve the correct avatar from AGENTS config
|
||||
// Merge data but preserve the configured visual metadata from frontend.
|
||||
return {
|
||||
...agent,
|
||||
...leaderboardData,
|
||||
@@ -317,10 +333,7 @@ export default function RoomView({ bubbles, bubbleFor, leaderboard, agentProfile
|
||||
// Skip system messages
|
||||
if (msg.agent === 'System') return;
|
||||
// Find matching agent
|
||||
const agent = AGENTS.find(a =>
|
||||
a.id === msg.agentId ||
|
||||
a.name === msg.agent
|
||||
);
|
||||
const agent = findAgentByIdOrName(agents, msg.agentId || msg.agent);
|
||||
if (agent) {
|
||||
messages.push({
|
||||
feedItemId: item.id,
|
||||
@@ -333,10 +346,7 @@ export default function RoomView({ bubbles, bubbleFor, leaderboard, agentProfile
|
||||
} else if (item.type === 'conference' && item.data?.messages) {
|
||||
item.data.messages.forEach((msg, msgIndex) => {
|
||||
if (msg.agent === 'System') return;
|
||||
const agent = AGENTS.find(a =>
|
||||
a.id === msg.agentId ||
|
||||
a.name === msg.agent
|
||||
);
|
||||
const agent = findAgentByIdOrName(agents, msg.agentId || msg.agent);
|
||||
if (agent) {
|
||||
messages.push({
|
||||
feedItemId: item.id,
|
||||
@@ -479,7 +489,7 @@ export default function RoomView({ bubbles, bubbleFor, leaderboard, agentProfile
|
||||
if (isReplaying) {
|
||||
// Find replay bubble for this agent
|
||||
const bubble = Object.values(replayBubbles).find(b => {
|
||||
const agent = AGENTS.find(a => a.id === b.agentId);
|
||||
const agent = agents.find(a => a.id === b.agentId);
|
||||
return agent && agent.name === agentName;
|
||||
});
|
||||
return bubble || null;
|
||||
@@ -487,13 +497,13 @@ export default function RoomView({ bubbles, bubbleFor, leaderboard, agentProfile
|
||||
// Use normal bubbleFor function
|
||||
return bubbleFor(agentName);
|
||||
}
|
||||
}, [isReplaying, replayBubbles, bubbleFor]);
|
||||
}, [agents, isReplaying, replayBubbles, bubbleFor]);
|
||||
|
||||
return (
|
||||
<div className="room-view">
|
||||
{/* Agents Indicator Bar */}
|
||||
<div className="room-agents-indicator">
|
||||
{AGENTS.map((agent, index) => {
|
||||
{agents.map((agent, index) => {
|
||||
const rank = getAgentRank(agent.id);
|
||||
const medal = rank ? getRankMedal(rank) : null;
|
||||
const agentData = getAgentData(agent.id);
|
||||
@@ -572,7 +582,7 @@ export default function RoomView({ bubbles, bubbleFor, leaderboard, agentProfile
|
||||
<canvas ref={canvasRef} className="room-canvas" />
|
||||
|
||||
{/* Speech Bubbles */}
|
||||
{AGENTS.map((agent, idx) => {
|
||||
{agents.map((agent, idx) => {
|
||||
const bubble = getBubbleForAgent(agent.name);
|
||||
if (!bubble) return null;
|
||||
|
||||
@@ -581,7 +591,7 @@ export default function RoomView({ bubbles, bubbleFor, leaderboard, agentProfile
|
||||
// Check if bubble is hidden
|
||||
if (hiddenBubbles[bubbleKey]) return null;
|
||||
|
||||
const pos = AGENT_SEATS[idx];
|
||||
const pos = getSeatPosition(idx);
|
||||
const scaledWidth = SCENE_NATIVE.width * scale;
|
||||
const scaledHeight = SCENE_NATIVE.height * scale;
|
||||
|
||||
|
||||
@@ -382,7 +382,7 @@ export default function RuntimeSettingsPanel({
|
||||
}}
|
||||
>
|
||||
<option value="daily">每日定时</option>
|
||||
<option value="intraday">盘中轮询</option>
|
||||
<option value="interval">间隔轮询</option>
|
||||
</select>
|
||||
</label>
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
*/
|
||||
|
||||
const trimTrailingSlash = (value) => value.replace(/\/+$/, "");
|
||||
const mediaAsset = (filename) => `/media/${filename}`;
|
||||
const isLocalDevHost = () => {
|
||||
if (typeof window === "undefined") {
|
||||
return false;
|
||||
@@ -14,12 +15,12 @@ const isLocalDevHost = () => {
|
||||
// Centralized CDN asset URLs
|
||||
export const CDN_ASSETS = {
|
||||
companyRoom: {
|
||||
agent_1: "https://img.alicdn.com/imgextra/i4/O1CN01Lr7SOl1lSExV0tOwv_!!6000000004817-2-tps-370-320.png",
|
||||
agent_2: "https://img.alicdn.com/imgextra/i3/O1CN017Kb8cY1VQNUmuK47o_!!6000000002647-2-tps-368-312.png",
|
||||
agent_3: "https://img.alicdn.com/imgextra/i3/O1CN010Fp55w1YqtGpVjgsS_!!6000000003111-2-tps-370-320.png",
|
||||
agent_4: "https://img.alicdn.com/imgextra/i3/O1CN01VnUsML1Dkq6fHw3ks_!!6000000000255-2-tps-366-316.png",
|
||||
agent_5: "https://img.alicdn.com/imgextra/i4/O1CN01o0kCQw1kyvbulBSl7_!!6000000004753-2-tps-370-314.png",
|
||||
agent_6: "https://img.alicdn.com/imgextra/i2/O1CN01cLV0zl1FI6ULAunTp_!!6000000000463-2-tps-368-320.png",
|
||||
agent_1: mediaAsset("0.png"),
|
||||
agent_2: mediaAsset("1.png"),
|
||||
agent_3: mediaAsset("2.png"),
|
||||
agent_4: mediaAsset("3.png"),
|
||||
agent_5: mediaAsset("4.png"),
|
||||
agent_6: mediaAsset("5.png"),
|
||||
team_logo: "https://img.alicdn.com/imgextra/i2/O1CN01n2S8aV25hcZhhNH95_!!6000000007558-2-tps-616-700.png",
|
||||
reme_logo: "https://img.alicdn.com/imgextra/i2/O1CN01FhncuT1Tqp8LfCaft_!!6000000002434-2-tps-915-250.png",
|
||||
full_room_dark: "https://img.alicdn.com/imgextra/i2/O1CN014sOgzK28re5haGC3X_!!6000000007986-2-tps-1248-832.png",
|
||||
@@ -45,6 +46,14 @@ export const ASSETS = {
|
||||
remeLogo: CDN_ASSETS.companyRoom.reme_logo,
|
||||
};
|
||||
|
||||
export const NON_MANAGER_AVATAR_POOL = Array.from({ length: 10 }, (_, index) => (
|
||||
mediaAsset(`${index + 2}.png`)
|
||||
));
|
||||
|
||||
export const DYNAMIC_ANALYST_AVATAR_POOL = Array.from({ length: 6 }, (_, index) => (
|
||||
mediaAsset(`${index + 6}.png`)
|
||||
));
|
||||
|
||||
// Scene dimensions (actual image size)
|
||||
export const SCENE_NATIVE = { width: 1248, height: 832 };
|
||||
|
||||
@@ -154,6 +163,32 @@ export const WS_URL =
|
||||
? `${FALLBACK_WS_PROTOCOL}//${FALLBACK_WS_HOST}:8765`
|
||||
: `${FALLBACK_WS_PROTOCOL}//${FALLBACK_WS_HOST}${FALLBACK_WS_PORT}/ws`);
|
||||
|
||||
// Dynamic Team Management API
|
||||
const DEFAULT_DYNAMIC_TEAM_API_BASE = isLocalDevHost()
|
||||
? "http://localhost:8003/api/dynamic-team"
|
||||
: `${DEFAULT_CONTROL_API_BASE}/dynamic-team`;
|
||||
export const DYNAMIC_TEAM_API_BASE =
|
||||
trimTrailingSlash(import.meta.env.VITE_DYNAMIC_TEAM_API_BASE_URL || "") ||
|
||||
DEFAULT_DYNAMIC_TEAM_API_BASE;
|
||||
|
||||
// Dynamic Team API Endpoints
|
||||
export const DYNAMIC_TEAM_ENDPOINTS = {
|
||||
// Get all available analyst types (builtin + runtime registered)
|
||||
listTypes: () => `${DYNAMIC_TEAM_API_BASE}/types`,
|
||||
// Get personas from personas.yaml
|
||||
getPersonas: () => `${DYNAMIC_TEAM_API_BASE}/personas`,
|
||||
// Create a new analyst
|
||||
createAnalyst: (runId) => `${DYNAMIC_TEAM_API_BASE}/runs/${runId}/analysts`,
|
||||
// Clone an existing analyst
|
||||
cloneAnalyst: (runId) => `${DYNAMIC_TEAM_API_BASE}/runs/${runId}/analysts/clone`,
|
||||
// Remove an analyst
|
||||
removeAnalyst: (runId, agentId) => `${DYNAMIC_TEAM_API_BASE}/runs/${runId}/analysts/${agentId}`,
|
||||
// Get analyst info
|
||||
getAnalystInfo: (runId, agentId) => `${DYNAMIC_TEAM_API_BASE}/runs/${runId}/analysts/${agentId}`,
|
||||
// Get team summary
|
||||
getTeamSummary: (runId) => `${DYNAMIC_TEAM_API_BASE}/runs/${runId}/summary`,
|
||||
};
|
||||
|
||||
// Initial ticker symbols for the production watchlist
|
||||
export const INITIAL_TICKERS = [
|
||||
{ symbol: "AAPL", price: null, change: null },
|
||||
@@ -170,3 +205,190 @@ export const INITIAL_TICKERS = [
|
||||
{ symbol: "COIN", price: null, change: null }
|
||||
];
|
||||
|
||||
// ============================================
|
||||
// Dynamic Analyst Team Management
|
||||
// ============================================
|
||||
|
||||
/**
|
||||
* Built-in analyst types that can be used as base for dynamic analysts
|
||||
*
|
||||
* IMPORTANT: When creating dynamic analysts, the agent_id MUST end with '_analyst'
|
||||
* to receive analysis tool groups (fundamentals, technical, sentiment, valuation tools).
|
||||
* Example: 'crypto_specialist_analyst' (correct) vs 'crypto_specialist' (incorrect)
|
||||
*/
|
||||
export const BUILTIN_ANALYST_TYPES = [
|
||||
{
|
||||
typeId: "fundamentals_analyst",
|
||||
name: "基本面分析师",
|
||||
description: "Uses LLM to intelligently select analysis tools, focuses on financial data and company fundamental analysis",
|
||||
icon: "fundamentals",
|
||||
},
|
||||
{
|
||||
typeId: "technical_analyst",
|
||||
name: "技术分析师",
|
||||
description: "Uses LLM to intelligently select analysis tools, focuses on technical indicators and chart analysis",
|
||||
icon: "technical",
|
||||
},
|
||||
{
|
||||
typeId: "sentiment_analyst",
|
||||
name: "情绪分析师",
|
||||
description: "Uses LLM to intelligently select analysis tools, analyzes market sentiment and news sentiment",
|
||||
icon: "sentiment",
|
||||
},
|
||||
{
|
||||
typeId: "valuation_analyst",
|
||||
name: "估值分析师",
|
||||
description: "Uses LLM to intelligently select analysis tools, focuses on company valuation and value assessment",
|
||||
icon: "valuation",
|
||||
},
|
||||
];
|
||||
|
||||
/**
|
||||
* Default colors for dynamically created analysts
|
||||
* Cycles through these colors for new analysts
|
||||
*/
|
||||
export const DYNAMIC_ANALYST_COLORS = [
|
||||
{ bg: "#F9FDFF", text: "#1565C0", accent: "#1565C0" }, // Blue
|
||||
{ bg: "#FFF8F8", text: "#C62828", accent: "#C62828" }, // Red
|
||||
{ bg: "#FAFFFA", text: "#2E7D32", accent: "#2E7D32" }, // Green
|
||||
{ bg: "#FCFAFF", text: "#6A1B9A", accent: "#6A1B9A" }, // Purple
|
||||
{ bg: "#FFFCF7", text: "#E65100", accent: "#E65100" }, // Orange
|
||||
{ bg: "#F9FEFF", text: "#00838F", accent: "#00838F" }, // Cyan
|
||||
{ bg: "#FFF9F5", text: "#D84315", accent: "#D84315" }, // Deep Orange
|
||||
{ bg: "#F5F5FF", text: "#4527A0", accent: "#4527A0" }, // Deep Purple
|
||||
];
|
||||
|
||||
/**
|
||||
* Generate a color scheme for a dynamic analyst based on index
|
||||
* @param {number} index - The index of the analyst
|
||||
* @returns {Object} Color scheme object
|
||||
*/
|
||||
export const getDynamicAnalystColors = (index) => {
|
||||
return DYNAMIC_ANALYST_COLORS[index % DYNAMIC_ANALYST_COLORS.length];
|
||||
};
|
||||
|
||||
/**
|
||||
* Generate a default avatar URL for dynamic analysts
|
||||
* Uses a hash of the agentId to select from available avatars
|
||||
* @param {string} agentId - The agent ID
|
||||
* @returns {string} Avatar URL
|
||||
*/
|
||||
export const getDynamicAnalystAvatar = (agentId) => {
|
||||
const avatars = [
|
||||
CDN_ASSETS.companyRoom.agent_1,
|
||||
CDN_ASSETS.companyRoom.agent_2,
|
||||
CDN_ASSETS.companyRoom.agent_3,
|
||||
CDN_ASSETS.companyRoom.agent_4,
|
||||
CDN_ASSETS.companyRoom.agent_5,
|
||||
CDN_ASSETS.companyRoom.agent_6,
|
||||
];
|
||||
// Simple hash function to consistently map agentId to an avatar
|
||||
const hash = agentId.split("").reduce((acc, char) => {
|
||||
return acc + char.charCodeAt(0);
|
||||
}, 0);
|
||||
return avatars[hash % avatars.length];
|
||||
};
|
||||
|
||||
/**
|
||||
* Create a dynamic analyst configuration object
|
||||
* @param {Object} config - Configuration object
|
||||
* @param {string} config.agentId - Unique identifier
|
||||
* @param {string} config.baseType - Base analyst type (e.g., "technical_analyst")
|
||||
* @param {string} config.name - Display name
|
||||
* @param {string[]} config.focus - Focus areas
|
||||
* @param {string} config.description - Description
|
||||
* @param {number} index - Index for color assignment
|
||||
* @returns {Object} Complete agent configuration
|
||||
*/
|
||||
export const createDynamicAnalystConfig = ({
|
||||
agentId,
|
||||
baseType,
|
||||
name,
|
||||
focus = [],
|
||||
description = "",
|
||||
index = 0,
|
||||
}) => {
|
||||
return {
|
||||
id: agentId,
|
||||
name: name || agentId,
|
||||
role: name || agentId,
|
||||
baseType,
|
||||
focus,
|
||||
description,
|
||||
avatar: getDynamicAnalystAvatar(agentId),
|
||||
colors: getDynamicAnalystColors(index),
|
||||
isDynamic: true,
|
||||
isCustom: true,
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Check if an agent is a dynamic analyst
|
||||
* @param {Object} agent - Agent object
|
||||
* @returns {boolean}
|
||||
*/
|
||||
export const isDynamicAnalyst = (agent) => {
|
||||
return agent?.isDynamic === true || agent?.id?.includes("_");
|
||||
};
|
||||
|
||||
/**
|
||||
* Validate agent ID format for dynamic analysts
|
||||
* @param {string} agentId - Agent ID to validate
|
||||
* @returns {Object} Validation result
|
||||
*/
|
||||
export const validateAgentId = (agentId) => {
|
||||
const errors = [];
|
||||
const warnings = [];
|
||||
|
||||
if (!agentId) {
|
||||
errors.push("Agent ID is required");
|
||||
} else if (typeof agentId !== "string") {
|
||||
errors.push("Agent ID must be a string");
|
||||
} else {
|
||||
if (agentId.length < 3) {
|
||||
errors.push("Agent ID must be at least 3 characters");
|
||||
}
|
||||
if (agentId.length > 50) {
|
||||
errors.push("Agent ID must be at most 50 characters");
|
||||
}
|
||||
if (!/^[a-zA-Z0-9_]+$/.test(agentId)) {
|
||||
errors.push("Agent ID can only contain letters, numbers, and underscores");
|
||||
}
|
||||
// Reserved IDs that cannot be used
|
||||
const reservedIds = ["portfolio_manager", "risk_manager"];
|
||||
if (reservedIds.includes(agentId)) {
|
||||
errors.push(`"${agentId}" is a reserved ID and cannot be used`);
|
||||
}
|
||||
// Warning: agent_id should end with '_analyst' to get analysis tools
|
||||
if (!agentId.endsWith("_analyst")) {
|
||||
warnings.push(
|
||||
"Agent ID should end with '_analyst' to receive analysis tool groups"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
valid: errors.length === 0,
|
||||
errors,
|
||||
warnings,
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Generate a suggested agent ID from a name
|
||||
* IMPORTANT: Agent ID must end with '_analyst' to receive analysis tools
|
||||
* @param {string} name - Display name
|
||||
* @param {string} baseType - Base analyst type
|
||||
* @returns {string} Suggested agent ID (guaranteed to end with '_analyst')
|
||||
*/
|
||||
export const suggestAgentId = (name, baseType) => {
|
||||
const timestamp = Date.now().toString(36).slice(-4);
|
||||
const normalized = name
|
||||
.toLowerCase()
|
||||
.replace(/[^a-z0-9\s]/g, "")
|
||||
.replace(/\s+/g, "_")
|
||||
.replace(/_analyst$/, "") // Remove '_analyst' suffix if present to avoid duplication
|
||||
.slice(0, 20);
|
||||
// Must end with '_analyst' to get analysis tools registered
|
||||
return `${normalized || baseType}_${timestamp}_analyst`;
|
||||
};
|
||||
|
||||
@@ -49,11 +49,24 @@ export function useAgentDataRequests(clientRef) {
|
||||
return runId;
|
||||
}, []);
|
||||
|
||||
const sendWs = useCallback((payload) => {
|
||||
const client = clientRef.current;
|
||||
if (!client) {
|
||||
return false;
|
||||
}
|
||||
return client.send(payload);
|
||||
}, [clientRef]);
|
||||
|
||||
const requestAgentSkills = useCallback((agentId) => {
|
||||
const normalized = typeof agentId === 'string' ? agentId.trim() : '';
|
||||
if (!normalized) return false;
|
||||
setIsAgentSkillsLoading(true);
|
||||
setAgentSkillsFeedback(null);
|
||||
const sent = sendWs({ type: 'get_agent_skills', agent_id: normalized });
|
||||
if (sent) {
|
||||
return true;
|
||||
}
|
||||
|
||||
void resolveRunId()
|
||||
.then((runId) => fetchAgentSkills(runId, normalized))
|
||||
.then((payload) => {
|
||||
@@ -61,22 +74,19 @@ export function useAgentDataRequests(clientRef) {
|
||||
setIsAgentSkillsLoading(false);
|
||||
})
|
||||
.catch(() => {
|
||||
if (!clientRef.current) {
|
||||
setIsAgentSkillsLoading(false);
|
||||
return;
|
||||
}
|
||||
console.debug('REST agent skills request failed, falling back to websocket compatibility path');
|
||||
const success = clientRef.current.send({ type: 'get_agent_skills', agent_id: normalized });
|
||||
if (!success) {
|
||||
setIsAgentSkillsLoading(false);
|
||||
}
|
||||
});
|
||||
return true;
|
||||
}, [clientRef, resolveRunId, setAgentSkillsByAgent, setIsAgentSkillsLoading, setAgentSkillsFeedback]);
|
||||
}, [resolveRunId, sendWs, setAgentSkillsByAgent, setIsAgentSkillsLoading, setAgentSkillsFeedback]);
|
||||
|
||||
const requestAgentProfile = useCallback((agentId) => {
|
||||
const normalized = typeof agentId === 'string' ? agentId.trim() : '';
|
||||
if (!normalized) return false;
|
||||
const sent = sendWs({ type: 'get_agent_profile', agent_id: normalized });
|
||||
if (sent) {
|
||||
return true;
|
||||
}
|
||||
|
||||
void resolveRunId()
|
||||
.then((runId) => fetchAgentProfile(runId, normalized))
|
||||
.then((payload) => {
|
||||
@@ -85,20 +95,20 @@ export function useAgentDataRequests(clientRef) {
|
||||
[normalized]: payload?.profile && typeof payload.profile === 'object' ? payload.profile : {}
|
||||
}));
|
||||
})
|
||||
.catch(() => {
|
||||
if (clientRef.current) {
|
||||
console.debug('REST agent profile request failed, falling back to websocket compatibility path');
|
||||
clientRef.current.send({ type: 'get_agent_profile', agent_id: normalized });
|
||||
}
|
||||
});
|
||||
.catch(() => {});
|
||||
return true;
|
||||
}, [clientRef, resolveRunId, setAgentProfilesByAgent]);
|
||||
}, [resolveRunId, sendWs, setAgentProfilesByAgent]);
|
||||
|
||||
const requestSkillDetail = useCallback((skillName) => {
|
||||
const normalized = typeof skillName === 'string' ? skillName.trim() : '';
|
||||
if (!normalized) return false;
|
||||
const detailKey = `${selectedSkillAgentId}:${normalized}`;
|
||||
setSkillDetailLoadingKey(detailKey);
|
||||
const sent = sendWs({ type: 'get_skill_detail', agent_id: selectedSkillAgentId, skill_name: normalized });
|
||||
if (sent) {
|
||||
return true;
|
||||
}
|
||||
|
||||
void resolveRunId()
|
||||
.then((runId) => fetchAgentSkillDetail(runId, selectedSkillAgentId, normalized))
|
||||
.then((payload) => {
|
||||
@@ -110,18 +120,10 @@ export function useAgentDataRequests(clientRef) {
|
||||
setSkillDetailLoadingKey(null);
|
||||
})
|
||||
.catch(() => {
|
||||
if (!clientRef.current) {
|
||||
setSkillDetailLoadingKey(null);
|
||||
return;
|
||||
}
|
||||
console.debug('REST skill detail request failed, falling back to websocket compatibility path');
|
||||
const success = clientRef.current.send({ type: 'get_skill_detail', agent_id: selectedSkillAgentId, skill_name: normalized });
|
||||
if (!success) {
|
||||
setSkillDetailLoadingKey(null);
|
||||
}
|
||||
});
|
||||
return true;
|
||||
}, [clientRef, resolveRunId, selectedSkillAgentId, setSkillDetailLoadingKey, setSkillDetailsByName]);
|
||||
}, [resolveRunId, selectedSkillAgentId, sendWs, setSkillDetailLoadingKey, setSkillDetailsByName]);
|
||||
|
||||
const handleCreateLocalSkill = useCallback((skillName) => {
|
||||
const normalized = typeof skillName === 'string' ? skillName.trim() : '';
|
||||
@@ -131,6 +133,11 @@ export function useAgentDataRequests(clientRef) {
|
||||
}
|
||||
setAgentSkillsSavingKey(`${selectedSkillAgentId}:${normalized}:create`);
|
||||
setAgentSkillsFeedback(null);
|
||||
const sent = sendWs({ type: 'create_agent_local_skill', agent_id: selectedSkillAgentId, skill_name: normalized });
|
||||
if (sent) {
|
||||
return;
|
||||
}
|
||||
|
||||
void resolveRunId()
|
||||
.then((runId) => createAgentLocalSkill(runId, selectedSkillAgentId, normalized))
|
||||
.then(() => {
|
||||
@@ -140,19 +147,10 @@ export function useAgentDataRequests(clientRef) {
|
||||
requestSkillDetail(normalized);
|
||||
})
|
||||
.catch(() => {
|
||||
if (!clientRef.current) {
|
||||
setAgentSkillsSavingKey(null);
|
||||
setAgentSkillsFeedback({ type: 'error', text: '连接未就绪,稍后重试' });
|
||||
return;
|
||||
}
|
||||
console.debug('REST local skill create failed, falling back to websocket compatibility path');
|
||||
const success = clientRef.current.send({ type: 'create_agent_local_skill', agent_id: selectedSkillAgentId, skill_name: normalized });
|
||||
if (!success) {
|
||||
setAgentSkillsSavingKey(null);
|
||||
setAgentSkillsFeedback({ type: 'error', text: '发送失败,请检查连接状态' });
|
||||
}
|
||||
});
|
||||
}, [clientRef, requestAgentSkills, requestSkillDetail, resolveRunId, selectedSkillAgentId, setAgentSkillsFeedback, setAgentSkillsSavingKey]);
|
||||
}, [requestAgentSkills, requestSkillDetail, resolveRunId, selectedSkillAgentId, sendWs, setAgentSkillsFeedback, setAgentSkillsSavingKey]);
|
||||
|
||||
const handleLocalSkillDraftChange = useCallback((skillName, content) => {
|
||||
const detailKey = `${selectedSkillAgentId}:${skillName}`;
|
||||
@@ -165,6 +163,11 @@ export function useAgentDataRequests(clientRef) {
|
||||
if (typeof content !== 'string') return;
|
||||
setAgentSkillsSavingKey(`${selectedSkillAgentId}:${skillName}:content`);
|
||||
setAgentSkillsFeedback(null);
|
||||
const sent = sendWs({ type: 'update_agent_local_skill', agent_id: selectedSkillAgentId, skill_name: skillName, content });
|
||||
if (sent) {
|
||||
return;
|
||||
}
|
||||
|
||||
void resolveRunId()
|
||||
.then((runId) => updateAgentLocalSkill(runId, selectedSkillAgentId, skillName, content))
|
||||
.then(() => {
|
||||
@@ -173,23 +176,19 @@ export function useAgentDataRequests(clientRef) {
|
||||
requestSkillDetail(skillName);
|
||||
})
|
||||
.catch(() => {
|
||||
if (!clientRef.current) {
|
||||
setAgentSkillsSavingKey(null);
|
||||
setAgentSkillsFeedback({ type: 'error', text: '连接未就绪,稍后重试' });
|
||||
return;
|
||||
}
|
||||
console.debug('REST local skill save failed, falling back to websocket compatibility path');
|
||||
const success = clientRef.current.send({ type: 'update_agent_local_skill', agent_id: selectedSkillAgentId, skill_name: skillName, content });
|
||||
if (!success) {
|
||||
setAgentSkillsSavingKey(null);
|
||||
setAgentSkillsFeedback({ type: 'error', text: '发送失败,请检查连接状态' });
|
||||
}
|
||||
});
|
||||
}, [clientRef, localSkillDraftsByKey, requestSkillDetail, resolveRunId, selectedSkillAgentId, setAgentSkillsFeedback, setAgentSkillsSavingKey]);
|
||||
}, [localSkillDraftsByKey, requestSkillDetail, resolveRunId, selectedSkillAgentId, sendWs, setAgentSkillsFeedback, setAgentSkillsSavingKey]);
|
||||
|
||||
const handleLocalSkillDelete = useCallback((skillName) => {
|
||||
setAgentSkillsSavingKey(`${selectedSkillAgentId}:${skillName}:delete`);
|
||||
setAgentSkillsFeedback(null);
|
||||
const sent = sendWs({ type: 'delete_agent_local_skill', agent_id: selectedSkillAgentId, skill_name: skillName });
|
||||
if (sent) {
|
||||
return;
|
||||
}
|
||||
|
||||
void resolveRunId()
|
||||
.then((runId) => deleteAgentLocalSkill(runId, selectedSkillAgentId, skillName))
|
||||
.then(() => {
|
||||
@@ -198,23 +197,19 @@ export function useAgentDataRequests(clientRef) {
|
||||
requestAgentSkills(selectedSkillAgentId);
|
||||
})
|
||||
.catch(() => {
|
||||
if (!clientRef.current) {
|
||||
setAgentSkillsSavingKey(null);
|
||||
setAgentSkillsFeedback({ type: 'error', text: '连接未就绪,稍后重试' });
|
||||
return;
|
||||
}
|
||||
console.debug('REST local skill delete failed, falling back to websocket compatibility path');
|
||||
const success = clientRef.current.send({ type: 'delete_agent_local_skill', agent_id: selectedSkillAgentId, skill_name: skillName });
|
||||
if (!success) {
|
||||
setAgentSkillsSavingKey(null);
|
||||
setAgentSkillsFeedback({ type: 'error', text: '发送失败,请检查连接状态' });
|
||||
}
|
||||
});
|
||||
}, [clientRef, requestAgentSkills, resolveRunId, selectedSkillAgentId, setAgentSkillsFeedback, setAgentSkillsSavingKey]);
|
||||
}, [requestAgentSkills, resolveRunId, selectedSkillAgentId, sendWs, setAgentSkillsFeedback, setAgentSkillsSavingKey]);
|
||||
|
||||
const handleRemoveSharedSkill = useCallback((skillName) => {
|
||||
setAgentSkillsSavingKey(`${selectedSkillAgentId}:${skillName}:remove`);
|
||||
setAgentSkillsFeedback(null);
|
||||
const sent = sendWs({ type: 'remove_agent_skill', agent_id: selectedSkillAgentId, skill_name: skillName });
|
||||
if (sent) {
|
||||
return;
|
||||
}
|
||||
|
||||
void resolveRunId()
|
||||
.then((runId) => disableAgentSkill(runId, selectedSkillAgentId, skillName))
|
||||
.then(() => {
|
||||
@@ -223,24 +218,20 @@ export function useAgentDataRequests(clientRef) {
|
||||
requestAgentSkills(selectedSkillAgentId);
|
||||
})
|
||||
.catch(() => {
|
||||
if (!clientRef.current) {
|
||||
setAgentSkillsSavingKey(null);
|
||||
setAgentSkillsFeedback({ type: 'error', text: '连接未就绪,稍后重试' });
|
||||
return;
|
||||
}
|
||||
console.debug('REST shared skill remove failed, falling back to websocket compatibility path');
|
||||
const success = clientRef.current.send({ type: 'remove_agent_skill', agent_id: selectedSkillAgentId, skill_name: skillName });
|
||||
if (!success) {
|
||||
setAgentSkillsSavingKey(null);
|
||||
setAgentSkillsFeedback({ type: 'error', text: '发送失败,请检查连接状态' });
|
||||
}
|
||||
});
|
||||
}, [clientRef, requestAgentSkills, resolveRunId, selectedSkillAgentId, setAgentSkillsFeedback, setAgentSkillsSavingKey]);
|
||||
}, [requestAgentSkills, resolveRunId, selectedSkillAgentId, sendWs, setAgentSkillsFeedback, setAgentSkillsSavingKey]);
|
||||
|
||||
const handleAgentSkillToggle = useCallback((skillName, enabled) => {
|
||||
const agentId = selectedSkillAgentId;
|
||||
setAgentSkillsSavingKey(`${agentId}:${skillName}`);
|
||||
setAgentSkillsFeedback(null);
|
||||
const sent = sendWs({ type: 'update_agent_skill', agent_id: agentId, skill_name: skillName, enabled });
|
||||
if (sent) {
|
||||
return;
|
||||
}
|
||||
|
||||
void resolveRunId()
|
||||
.then((runId) => enabled
|
||||
? enableAgentSkill(runId, agentId, skillName)
|
||||
@@ -251,19 +242,10 @@ export function useAgentDataRequests(clientRef) {
|
||||
requestAgentSkills(agentId);
|
||||
})
|
||||
.catch(() => {
|
||||
if (!clientRef.current) {
|
||||
setAgentSkillsSavingKey(null);
|
||||
setAgentSkillsFeedback({ type: 'error', text: '连接未就绪,稍后重试' });
|
||||
return;
|
||||
}
|
||||
console.debug('REST skill toggle failed, falling back to websocket compatibility path');
|
||||
const success = clientRef.current.send({ type: 'update_agent_skill', agent_id: agentId, skill_name: skillName, enabled });
|
||||
if (!success) {
|
||||
setAgentSkillsSavingKey(null);
|
||||
setAgentSkillsFeedback({ type: 'error', text: '发送失败,请检查连接状态' });
|
||||
}
|
||||
});
|
||||
}, [clientRef, requestAgentSkills, resolveRunId, selectedSkillAgentId, setAgentSkillsFeedback, setAgentSkillsSavingKey]);
|
||||
}, [requestAgentSkills, resolveRunId, selectedSkillAgentId, sendWs, setAgentSkillsFeedback, setAgentSkillsSavingKey]);
|
||||
|
||||
const handleSkillAgentChange = useCallback((agentId) => {
|
||||
setSelectedSkillAgentId(agentId);
|
||||
@@ -278,6 +260,11 @@ export function useAgentDataRequests(clientRef) {
|
||||
if (!normalizedAgentId || !normalizedFilename) return false;
|
||||
setIsWorkspaceFileLoading(true);
|
||||
setWorkspaceFileFeedback(null);
|
||||
const sent = sendWs({ type: 'get_agent_workspace_file', agent_id: normalizedAgentId, filename: normalizedFilename });
|
||||
if (sent) {
|
||||
return true;
|
||||
}
|
||||
|
||||
void resolveRunId()
|
||||
.then((runId) => fetchAgentWorkspaceFile(runId, normalizedAgentId, normalizedFilename))
|
||||
.then((payload) => {
|
||||
@@ -292,18 +279,10 @@ export function useAgentDataRequests(clientRef) {
|
||||
setIsWorkspaceFileLoading(false);
|
||||
})
|
||||
.catch(() => {
|
||||
if (!clientRef.current) {
|
||||
setIsWorkspaceFileLoading(false);
|
||||
return;
|
||||
}
|
||||
console.debug('REST workspace file read failed, falling back to websocket compatibility path');
|
||||
const success = clientRef.current.send({ type: 'get_agent_workspace_file', agent_id: normalizedAgentId, filename: normalizedFilename });
|
||||
if (!success) {
|
||||
setIsWorkspaceFileLoading(false);
|
||||
}
|
||||
});
|
||||
return true;
|
||||
}, [clientRef, resolveRunId, setIsWorkspaceFileLoading, setWorkspaceDraftContent, setWorkspaceFileFeedback, setWorkspaceFilesByAgent]);
|
||||
}, [resolveRunId, sendWs, setIsWorkspaceFileLoading, setWorkspaceDraftContent, setWorkspaceFileFeedback, setWorkspaceFilesByAgent]);
|
||||
|
||||
const handleWorkspaceFileChange = useCallback((filename) => {
|
||||
useAgentStore.getState().setSelectedWorkspaceFile(filename);
|
||||
@@ -314,6 +293,16 @@ export function useAgentDataRequests(clientRef) {
|
||||
const key = `${selectedSkillAgentId}:${selectedWorkspaceFile}`;
|
||||
setWorkspaceFileSavingKey(key);
|
||||
setWorkspaceFileFeedback(null);
|
||||
const sent = sendWs({
|
||||
type: 'update_agent_workspace_file',
|
||||
agent_id: selectedSkillAgentId,
|
||||
filename: selectedWorkspaceFile,
|
||||
content: workspaceDraftContent
|
||||
});
|
||||
if (sent) {
|
||||
return;
|
||||
}
|
||||
|
||||
void resolveRunId()
|
||||
.then((runId) => updateAgentWorkspaceFile(runId, selectedSkillAgentId, selectedWorkspaceFile, workspaceDraftContent))
|
||||
.then((payload) => {
|
||||
@@ -328,24 +317,10 @@ export function useAgentDataRequests(clientRef) {
|
||||
}));
|
||||
})
|
||||
.catch(() => {
|
||||
if (!clientRef.current) {
|
||||
setWorkspaceFileSavingKey(null);
|
||||
setWorkspaceFileFeedback({ type: 'error', text: '连接未就绪,稍后重试' });
|
||||
return;
|
||||
}
|
||||
console.debug('REST workspace file save failed, falling back to websocket compatibility path');
|
||||
const success = clientRef.current.send({
|
||||
type: 'update_agent_workspace_file',
|
||||
agent_id: selectedSkillAgentId,
|
||||
filename: selectedWorkspaceFile,
|
||||
content: workspaceDraftContent
|
||||
});
|
||||
if (!success) {
|
||||
setWorkspaceFileSavingKey(null);
|
||||
setWorkspaceFileFeedback({ type: 'error', text: '发送失败,请检查连接状态' });
|
||||
}
|
||||
});
|
||||
}, [clientRef, resolveRunId, selectedSkillAgentId, selectedWorkspaceFile, setWorkspaceFileFeedback, setWorkspaceFileSavingKey, setWorkspaceFilesByAgent, workspaceDraftContent]);
|
||||
}, [resolveRunId, selectedSkillAgentId, selectedWorkspaceFile, sendWs, setWorkspaceFileFeedback, setWorkspaceFileSavingKey, setWorkspaceFilesByAgent, workspaceDraftContent]);
|
||||
|
||||
const handleUploadExternalSkill = useCallback(async (file) => {
|
||||
if (!(file instanceof File)) {
|
||||
|
||||