chore: remove legacy startup paths
This commit is contained in:
6
.gitignore
vendored
6
.gitignore
vendored
@@ -51,11 +51,17 @@ node_modules
|
||||
outputs/
|
||||
/production/
|
||||
/smoke_test/
|
||||
/frontend/dist/
|
||||
/frontend/test-results/
|
||||
|
||||
# Local tooling state
|
||||
.omc/
|
||||
/.pydeps/
|
||||
/referance/
|
||||
/.pids/
|
||||
/.pytest_cache/
|
||||
/.ruff_cache/
|
||||
/evotraders.egg-info/
|
||||
|
||||
# Run outputs
|
||||
/runs/
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
73343
|
||||
@@ -1 +0,0 @@
|
||||
73348
|
||||
@@ -1 +0,0 @@
|
||||
66939
|
||||
@@ -1 +0,0 @@
|
||||
73345
|
||||
@@ -1 +0,0 @@
|
||||
73347
|
||||
@@ -1 +0,0 @@
|
||||
73346
|
||||
@@ -1 +0,0 @@
|
||||
73344
|
||||
43
README.md
43
README.md
@@ -21,8 +21,11 @@ The repository name still uses `evotraders`, but the product-facing branding now
|
||||
**Multi-agent trading team**
|
||||
Six roles collaborate like a real desk: four specialist analysts (fundamentals, technical, sentiment, valuation), one portfolio manager, and one risk manager.
|
||||
|
||||
**Continuous learning**
|
||||
Agents can persist long-term memory with ReMe, reflect after each cycle, and evolve their decision patterns over time.
|
||||
**Continuous learning & Evolution**
|
||||
Agents persist long-term memory with ReMe and reflect after each cycle. The **Autonomous Policy Optimizer (APO)** automatically tunes agent operational policies (`POLICY.md`) based on daily P&L feedback to correct recurring mistakes.
|
||||
|
||||
**Robust execution with recovery**
|
||||
The trading pipeline supports **phase-based checkpointing**. If a run is interrupted, it can resume from the last successful phase (Analysis, Risk, Discussion, Decision, Execution, or Settlement), ensuring resilience in production.
|
||||
|
||||
**Backtest and live modes**
|
||||
The same runtime model supports historical simulation and live execution with real-time market data.
|
||||
@@ -68,6 +71,7 @@ Runtime state is stored in `runs/<run_id>/` — this is the **runtime source of
|
||||
|
||||
### Documentation
|
||||
|
||||
- [docs/README.md](./docs/README.md) — documentation index and reading order
|
||||
- [docs/current-architecture.md](./docs/current-architecture.md) — canonical architecture facts
|
||||
- [services/README.md](./services/README.md) — service boundaries and migration details
|
||||
- [docs/current-architecture.excalidraw](./docs/current-architecture.excalidraw) — visual diagram
|
||||
@@ -84,15 +88,11 @@ Runtime state is stored in `runs/<run_id>/` — this is the **runtime source of
|
||||
# clone this repository, then:
|
||||
cd evotraders
|
||||
|
||||
# backend runtime dependencies
|
||||
uv pip install -r requirements.txt
|
||||
|
||||
# install package entrypoint in editable mode
|
||||
uv pip install -e .
|
||||
|
||||
# optional
|
||||
# optional dev dependencies
|
||||
# uv pip install -e ".[dev]"
|
||||
# pip install -e .
|
||||
```
|
||||
|
||||
Frontend dependencies:
|
||||
@@ -103,7 +103,7 @@ npm ci
|
||||
cd ..
|
||||
```
|
||||
|
||||
Production deployment should prefer `requirements.txt` for backend and `npm ci` for frontend so the pulled environment matches the checked-in lockfiles and version pins.
|
||||
Production deployment should prefer the checked-in Python package metadata in `pyproject.toml` for backend installation and `npm ci` for frontend so the pulled environment matches the checked-in dependency declarations and lockfiles.
|
||||
|
||||
### 2. Configure environment
|
||||
|
||||
@@ -206,13 +206,13 @@ Recommended local development flow:
|
||||
./start-dev.sh
|
||||
```
|
||||
|
||||
This starts:
|
||||
This starts directly from the script:
|
||||
|
||||
- `agent_service` at `http://localhost:8000`
|
||||
- `trading_service` at `http://localhost:8001`
|
||||
- `news_service` at `http://localhost:8002`
|
||||
- `runtime_service` at `http://localhost:8003`
|
||||
- gateway WebSocket at `ws://localhost:8765`
|
||||
- gateway WebSocket at `ws://localhost:8765` via `runtime_service` managed startup
|
||||
|
||||
Then start the frontend in another terminal:
|
||||
|
||||
@@ -229,31 +229,34 @@ python -m uvicorn backend.apps.agent_service:app --host 0.0.0.0 --port 8000 --re
|
||||
python -m uvicorn backend.apps.trading_service:app --host 0.0.0.0 --port 8001 --reload
|
||||
python -m uvicorn backend.apps.news_service:app --host 0.0.0.0 --port 8002 --reload
|
||||
python -m uvicorn backend.apps.runtime_service:app --host 0.0.0.0 --port 8003 --reload
|
||||
# compatibility gateway path, not the recommended primary dev entrypoint
|
||||
python -m backend.main --mode live --host 0.0.0.0 --port 8765
|
||||
|
||||
# then create a runtime so runtime_service can spawn the Gateway subprocess
|
||||
curl -X POST http://localhost:8003/api/runtime/start \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"launch_mode":"fresh","tickers":["AAPL","MSFT"],"mode":"live"}'
|
||||
```
|
||||
### 4. Run backtest or live mode
|
||||
|
||||
Backtest:
|
||||
|
||||
```bash
|
||||
python backend/main.py --mode backtest --config-name smoke_fullstack --start-date 2025-11-01 --end-date 2025-12-01
|
||||
python backend/main.py --mode backtest --config-name smoke_fullstack --start-date 2025-11-01 --end-date 2025-12-01 --enable-memory
|
||||
curl -X POST http://localhost:8003/api/runtime/start \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"launch_mode":"fresh","mode":"backtest","tickers":["AAPL","MSFT"],"start_date":"2025-11-01","end_date":"2025-12-01"}'
|
||||
```
|
||||
|
||||
Live:
|
||||
|
||||
```bash
|
||||
python backend/main.py --mode live --config-name live
|
||||
python backend/main.py --mode live --config-name live --enable-memory
|
||||
python backend/main.py --mode live --config-name live --interval-minutes 60
|
||||
python backend/main.py --mode live --config-name live --trigger-time 22:30
|
||||
curl -X POST http://localhost:8003/api/runtime/start \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"launch_mode":"fresh","mode":"live","tickers":["AAPL","MSFT"]}'
|
||||
```
|
||||
|
||||
Help:
|
||||
|
||||
```bash
|
||||
python backend/main.py --help
|
||||
python backend/main.py --help # compatibility standalone entrypoint only
|
||||
```
|
||||
### Offline backtest data
|
||||
|
||||
@@ -311,7 +314,7 @@ If these are not set, the frontend falls back to its local defaults and compatib
|
||||
|
||||
```text
|
||||
Market data -> independent analyst work -> team communication -> portfolio decision ->
|
||||
risk review -> execution/settlement -> reflection/memory update
|
||||
risk review -> execution/settlement -> reflection/memory update -> APO policy tuning
|
||||
```
|
||||
|
||||
The runtime manager also tracks:
|
||||
|
||||
36
README_zh.md
36
README_zh.md
@@ -66,7 +66,9 @@ frontend -> runtime_service/control APIs -> gateway/runtime manager -> market se
|
||||
|
||||
### 文档
|
||||
|
||||
- [docs/README.md](./docs/README.md) — 文档索引与阅读顺序
|
||||
- [docs/current-architecture.md](./docs/current-architecture.md) — 权威架构事实
|
||||
- [docs/project-layout.md](./docs/project-layout.md) — 当前目录结构与职责说明
|
||||
- [services/README.md](./services/README.md) — 服务边界和迁移详情
|
||||
- [docs/current-architecture.excalidraw](./docs/current-architecture.excalidraw) — 架构图
|
||||
- [docs/development-roadmap.md](./docs/development-roadmap.md) — 下一步执行计划
|
||||
@@ -82,15 +84,11 @@ frontend -> runtime_service/control APIs -> gateway/runtime manager -> market se
|
||||
# 克隆仓库后进入项目目录
|
||||
cd evotraders
|
||||
|
||||
# 安装后端运行时依赖
|
||||
uv pip install -r requirements.txt
|
||||
|
||||
# 安装项目入口(可编辑模式)
|
||||
uv pip install -e .
|
||||
|
||||
# 可选
|
||||
# 可选开发依赖
|
||||
# uv pip install -e ".[dev]"
|
||||
# pip install -e .
|
||||
```
|
||||
|
||||
前端依赖:
|
||||
@@ -101,7 +99,7 @@ npm ci
|
||||
cd ..
|
||||
```
|
||||
|
||||
生产环境部署建议后端使用 `requirements.txt`,前端使用 `npm ci`,这样拉起的环境会严格跟随仓库中锁定的依赖版本。
|
||||
生产环境部署建议后端以 `pyproject.toml` 中声明的包元数据为准进行安装,前端使用 `npm ci`,这样拉起的环境会严格跟随仓库中声明的依赖和锁定版本。
|
||||
|
||||
### 2. 配置环境变量
|
||||
|
||||
@@ -178,7 +176,7 @@ python3 scripts/smoke_evo_runtime.py --agent-id fundamentals_analyst
|
||||
- `trading_service`:`http://localhost:8001`
|
||||
- `news_service`:`http://localhost:8002`
|
||||
- `runtime_service`:`http://localhost:8003`
|
||||
- gateway WebSocket:`ws://localhost:8765`
|
||||
- gateway WebSocket:`ws://localhost:8765`,由 `runtime_service` 托管拉起
|
||||
|
||||
然后在另一个终端启动前端:
|
||||
|
||||
@@ -195,8 +193,11 @@ python -m uvicorn backend.apps.agent_service:app --host 0.0.0.0 --port 8000 --re
|
||||
python -m uvicorn backend.apps.trading_service:app --host 0.0.0.0 --port 8001 --reload
|
||||
python -m uvicorn backend.apps.news_service:app --host 0.0.0.0 --port 8002 --reload
|
||||
python -m uvicorn backend.apps.runtime_service:app --host 0.0.0.0 --port 8003 --reload
|
||||
# 兼容性 gateway 路径,不是推荐的主要开发入口
|
||||
python -m backend.main --mode live --host 0.0.0.0 --port 8765
|
||||
|
||||
# 然后通过 runtime_service 创建运行时,由它拉起 Gateway 子进程
|
||||
curl -X POST http://localhost:8003/api/runtime/start \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"launch_mode":"fresh","tickers":["AAPL","MSFT"],"mode":"live"}'
|
||||
```
|
||||
|
||||
仓库里部署脚本使用的 `production` 只是一个示例 run label,不应再把它理解成
|
||||
@@ -207,24 +208,19 @@ python -m backend.main --mode live --host 0.0.0.0 --port 8765
|
||||
回测:
|
||||
|
||||
```bash
|
||||
python backend/main.py --mode backtest --config-name smoke_fullstack --start-date 2025-11-01 --end-date 2025-12-01
|
||||
python backend/main.py --mode backtest --config-name smoke_fullstack --start-date 2025-11-01 --end-date 2025-12-01 --enable-memory
|
||||
curl -X POST http://localhost:8003/api/runtime/start \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"launch_mode":"fresh","mode":"backtest","tickers":["AAPL","MSFT"],"start_date":"2025-11-01","end_date":"2025-12-01"}'
|
||||
```
|
||||
|
||||
实盘:
|
||||
|
||||
```bash
|
||||
python backend/main.py --mode live --config-name live
|
||||
python backend/main.py --mode live --config-name live --enable-memory
|
||||
python backend/main.py --mode live --config-name live --interval-minutes 60
|
||||
python backend/main.py --mode live --config-name live --trigger-time 22:30
|
||||
curl -X POST http://localhost:8003/api/runtime/start \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"launch_mode":"fresh","mode":"live","tickers":["AAPL","MSFT"]}'
|
||||
```
|
||||
|
||||
帮助:
|
||||
|
||||
```bash
|
||||
python backend/main.py --help
|
||||
```
|
||||
### 离线回测数据
|
||||
|
||||
如果只是想快速体验回测,不依赖外部行情 API,可以下载离线数据包并解压到 `backend/data`:
|
||||
|
||||
@@ -1,46 +1,18 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Agent API Routes
|
||||
|
||||
Provides REST API endpoints for both:
|
||||
|
||||
- design-time agent management under `workspaces/`
|
||||
- run-scoped agent asset access under `runs/<run_id>/`
|
||||
"""
|
||||
"""Agent API routes for design-time workspace registry CRUD only."""
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Depends, Body, UploadFile, File, Form
|
||||
from fastapi import APIRouter, HTTPException, Depends
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from backend.agents import AgentFactory, get_registry
|
||||
from backend.agents.workspace_manager import RunWorkspaceManager
|
||||
from backend.agents.agent_workspace import load_agent_workspace_config
|
||||
from backend.agents.skills_manager import SkillsManager
|
||||
from backend.agents.toolkit_factory import load_agent_profiles
|
||||
from backend.config.bootstrap_config import get_bootstrap_config_for_run
|
||||
from backend.llm.models import get_agent_model_info
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix="/api/workspaces/{workspace_id}/agents", tags=["agents"])
|
||||
DESIGN_SCOPE = "design_workspace"
|
||||
RUNTIME_SCOPE = "runtime_run"
|
||||
RUNTIME_SCOPE_NOTE = (
|
||||
"For profile, skills, and editable agent files, `workspace_id` is treated "
|
||||
"as the active run id under `runs/<run_id>/`, not as the design-time "
|
||||
"`workspaces/` registry."
|
||||
)
|
||||
|
||||
|
||||
def _runtime_scope_fields() -> dict[str, str]:
|
||||
return {
|
||||
"scope_type": RUNTIME_SCOPE,
|
||||
"scope_note": RUNTIME_SCOPE_NOTE,
|
||||
}
|
||||
|
||||
|
||||
def _design_scope_fields() -> dict[str, str]:
|
||||
@@ -65,26 +37,9 @@ class CreateAgentRequest(BaseModel):
|
||||
|
||||
|
||||
class UpdateAgentRequest(BaseModel):
|
||||
"""Request to update an agent."""
|
||||
"""Request to update design-time agent metadata."""
|
||||
name: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
enabled_skills: Optional[List[str]] = None
|
||||
disabled_skills: Optional[List[str]] = None
|
||||
|
||||
|
||||
class InstallExternalSkillRequest(BaseModel):
|
||||
"""Request to install an external skill for one agent."""
|
||||
source: str = Field(..., description="Directory path, zip path, or http(s) zip URL")
|
||||
name: Optional[str] = Field(None, description="Optional override skill name")
|
||||
activate: bool = Field(True, description="Whether to enable skill immediately")
|
||||
|
||||
|
||||
class LocalSkillRequest(BaseModel):
|
||||
skill_name: str = Field(..., description="Local skill name")
|
||||
|
||||
|
||||
class LocalSkillContentRequest(BaseModel):
|
||||
content: str = Field(..., description="Updated SKILL.md content")
|
||||
|
||||
|
||||
class AgentResponse(BaseModel):
|
||||
@@ -99,54 +54,12 @@ class AgentResponse(BaseModel):
|
||||
scope_note: Optional[str] = None
|
||||
|
||||
|
||||
class AgentFileResponse(BaseModel):
|
||||
"""Agent file content response."""
|
||||
filename: str
|
||||
content: str
|
||||
scope_type: str = RUNTIME_SCOPE
|
||||
scope_note: Optional[str] = None
|
||||
|
||||
|
||||
class AgentProfileResponse(BaseModel):
|
||||
agent_id: str
|
||||
workspace_id: str
|
||||
profile: Dict[str, Any]
|
||||
scope_type: str = RUNTIME_SCOPE
|
||||
scope_note: Optional[str] = None
|
||||
|
||||
|
||||
class AgentSkillsResponse(BaseModel):
|
||||
agent_id: str
|
||||
workspace_id: str
|
||||
skills: List[Dict[str, Any]]
|
||||
scope_type: str = RUNTIME_SCOPE
|
||||
scope_note: Optional[str] = None
|
||||
|
||||
|
||||
class SkillDetailResponse(BaseModel):
|
||||
agent_id: str
|
||||
workspace_id: str
|
||||
skill: Dict[str, Any]
|
||||
scope_type: str = RUNTIME_SCOPE
|
||||
scope_note: Optional[str] = None
|
||||
|
||||
|
||||
# Dependencies
|
||||
def get_agent_factory():
|
||||
"""Get AgentFactory instance."""
|
||||
return AgentFactory()
|
||||
|
||||
|
||||
def get_workspace_manager():
|
||||
"""Get run-scoped asset manager for one runtime workspace/run id."""
|
||||
return RunWorkspaceManager()
|
||||
|
||||
|
||||
def get_skills_manager():
|
||||
"""Get SkillsManager instance."""
|
||||
return SkillsManager()
|
||||
|
||||
|
||||
# Routes
|
||||
@router.post("", response_model=AgentResponse)
|
||||
async def create_agent(
|
||||
@@ -270,119 +183,6 @@ async def get_agent(
|
||||
)
|
||||
|
||||
|
||||
@router.get("/{agent_id}/profile", response_model=AgentProfileResponse)
|
||||
async def get_agent_profile(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
skills_manager: SkillsManager = Depends(get_skills_manager),
|
||||
):
|
||||
asset_dir = skills_manager.get_agent_asset_dir(workspace_id, agent_id)
|
||||
agent_config = load_agent_workspace_config(asset_dir / "agent.yaml")
|
||||
profiles = load_agent_profiles()
|
||||
profile = profiles.get(agent_id, {})
|
||||
bootstrap = get_bootstrap_config_for_run(skills_manager.project_root, workspace_id)
|
||||
override = bootstrap.agent_override(agent_id)
|
||||
active_tool_groups = override.get("active_tool_groups", agent_config.active_tool_groups or profile.get("active_tool_groups", []))
|
||||
if not isinstance(active_tool_groups, list):
|
||||
active_tool_groups = []
|
||||
disabled_tool_groups = agent_config.disabled_tool_groups
|
||||
if disabled_tool_groups:
|
||||
disabled_set = set(disabled_tool_groups)
|
||||
active_tool_groups = [group_name for group_name in active_tool_groups if group_name not in disabled_set]
|
||||
|
||||
default_skills = profile.get("skills", [])
|
||||
if not isinstance(default_skills, list):
|
||||
default_skills = []
|
||||
resolved_skills = skills_manager.resolve_agent_skill_names(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
default_skills=default_skills,
|
||||
)
|
||||
prompt_files = agent_config.prompt_files or ["SOUL.md", "PROFILE.md", "AGENTS.md", "POLICY.md", "MEMORY.md"]
|
||||
model_name, model_provider = get_agent_model_info(agent_id)
|
||||
|
||||
return AgentProfileResponse(
|
||||
agent_id=agent_id,
|
||||
workspace_id=workspace_id,
|
||||
profile={
|
||||
"model_name": model_name,
|
||||
"model_provider": model_provider,
|
||||
"prompt_files": prompt_files,
|
||||
"default_skills": default_skills,
|
||||
"resolved_skills": resolved_skills,
|
||||
"active_tool_groups": active_tool_groups,
|
||||
"disabled_tool_groups": disabled_tool_groups,
|
||||
"enabled_skills": agent_config.enabled_skills,
|
||||
"disabled_skills": agent_config.disabled_skills,
|
||||
},
|
||||
**_runtime_scope_fields(),
|
||||
)
|
||||
|
||||
|
||||
@router.get("/{agent_id}/skills", response_model=AgentSkillsResponse)
|
||||
async def get_agent_skills(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
skills_manager: SkillsManager = Depends(get_skills_manager),
|
||||
):
|
||||
agent_asset_dir = skills_manager.get_agent_asset_dir(workspace_id, agent_id)
|
||||
agent_config = load_agent_workspace_config(agent_asset_dir / "agent.yaml")
|
||||
resolved_skills = set(skills_manager.resolve_agent_skill_names(config_name=workspace_id, agent_id=agent_id, default_skills=[]))
|
||||
enabled = set(agent_config.enabled_skills)
|
||||
disabled = set(agent_config.disabled_skills)
|
||||
|
||||
payload = []
|
||||
for item in skills_manager.list_agent_skill_catalog(workspace_id, agent_id):
|
||||
if item.skill_name in disabled:
|
||||
status = "disabled"
|
||||
elif item.skill_name in enabled:
|
||||
status = "enabled"
|
||||
elif item.skill_name in resolved_skills:
|
||||
status = "active"
|
||||
else:
|
||||
status = "available"
|
||||
payload.append({
|
||||
"skill_name": item.skill_name,
|
||||
"name": item.name,
|
||||
"description": item.description,
|
||||
"version": item.version,
|
||||
"source": item.source,
|
||||
"tools": item.tools,
|
||||
"status": status,
|
||||
})
|
||||
|
||||
return AgentSkillsResponse(
|
||||
agent_id=agent_id,
|
||||
workspace_id=workspace_id,
|
||||
skills=payload,
|
||||
**_runtime_scope_fields(),
|
||||
)
|
||||
|
||||
|
||||
@router.get("/{agent_id}/skills/{skill_name}", response_model=SkillDetailResponse)
|
||||
async def get_agent_skill_detail(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
skill_name: str,
|
||||
skills_manager: SkillsManager = Depends(get_skills_manager),
|
||||
):
|
||||
try:
|
||||
detail = skills_manager.load_agent_skill_document(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
skill_name=skill_name,
|
||||
)
|
||||
except FileNotFoundError:
|
||||
raise HTTPException(status_code=404, detail=f"Unknown skill: {skill_name}")
|
||||
|
||||
return SkillDetailResponse(
|
||||
agent_id=agent_id,
|
||||
workspace_id=workspace_id,
|
||||
skill=detail,
|
||||
**_runtime_scope_fields(),
|
||||
)
|
||||
|
||||
|
||||
@router.delete("/{agent_id}")
|
||||
async def delete_agent(
|
||||
workspace_id: str,
|
||||
@@ -448,16 +248,6 @@ async def update_agent(
|
||||
if metadata_updates:
|
||||
registry.update_metadata(agent_id, metadata_updates)
|
||||
|
||||
# Update skills if provided
|
||||
if request.enabled_skills or request.disabled_skills:
|
||||
skills_manager = SkillsManager()
|
||||
skills_manager.update_agent_skill_overrides(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
enable=request.enabled_skills or [],
|
||||
disable=request.disabled_skills or [],
|
||||
)
|
||||
|
||||
# Get updated info
|
||||
agent_info = registry.get(agent_id)
|
||||
return AgentResponse(
|
||||
@@ -469,301 +259,3 @@ async def update_agent(
|
||||
status=agent_info.status,
|
||||
**_design_scope_fields(),
|
||||
)
|
||||
|
||||
|
||||
@router.post("/{agent_id}/skills/{skill_name}/enable")
|
||||
async def enable_skill(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
skill_name: str,
|
||||
registry = Depends(get_registry),
|
||||
):
|
||||
"""
|
||||
Enable a skill for an agent.
|
||||
|
||||
Args:
|
||||
workspace_id: Workspace identifier
|
||||
agent_id: Agent identifier
|
||||
skill_name: Skill name to enable
|
||||
|
||||
Returns:
|
||||
Success message
|
||||
"""
|
||||
agent_info = registry.get(agent_id)
|
||||
if not agent_info or agent_info.workspace_id != workspace_id:
|
||||
raise HTTPException(status_code=404, detail=f"Agent '{agent_id}' not found")
|
||||
|
||||
skills_manager = SkillsManager()
|
||||
result = skills_manager.update_agent_skill_overrides(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
enable=[skill_name],
|
||||
)
|
||||
|
||||
return {
|
||||
"message": f"Skill '{skill_name}' enabled for agent '{agent_id}'",
|
||||
"enabled_skills": result["enabled_skills"],
|
||||
}
|
||||
|
||||
|
||||
@router.post("/{agent_id}/skills/{skill_name}/disable")
|
||||
async def disable_skill(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
skill_name: str,
|
||||
registry = Depends(get_registry),
|
||||
):
|
||||
"""
|
||||
Disable a skill for an agent.
|
||||
|
||||
Args:
|
||||
workspace_id: Workspace identifier
|
||||
agent_id: Agent identifier
|
||||
skill_name: Skill name to disable
|
||||
|
||||
Returns:
|
||||
Success message
|
||||
"""
|
||||
agent_info = registry.get(agent_id)
|
||||
if not agent_info or agent_info.workspace_id != workspace_id:
|
||||
raise HTTPException(status_code=404, detail=f"Agent '{agent_id}' not found")
|
||||
|
||||
skills_manager = SkillsManager()
|
||||
result = skills_manager.update_agent_skill_overrides(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
disable=[skill_name],
|
||||
)
|
||||
|
||||
return {
|
||||
"message": f"Skill '{skill_name}' disabled for agent '{agent_id}'",
|
||||
"disabled_skills": result["disabled_skills"],
|
||||
}
|
||||
|
||||
|
||||
@router.post("/{agent_id}/skills/install")
|
||||
async def install_external_skill(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
request: InstallExternalSkillRequest,
|
||||
registry=Depends(get_registry),
|
||||
):
|
||||
"""Install an external skill into one agent's local skills."""
|
||||
agent_info = registry.get(agent_id)
|
||||
if not agent_info or agent_info.workspace_id != workspace_id:
|
||||
raise HTTPException(status_code=404, detail=f"Agent '{agent_id}' not found")
|
||||
|
||||
skills_manager = SkillsManager()
|
||||
try:
|
||||
result = skills_manager.install_external_skill_for_agent(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
source=request.source,
|
||||
skill_name=request.name,
|
||||
activate=request.activate,
|
||||
)
|
||||
except (FileNotFoundError, ValueError) as exc:
|
||||
raise HTTPException(status_code=400, detail=str(exc))
|
||||
|
||||
return {
|
||||
"message": f"Installed external skill '{result['skill_name']}' for '{agent_id}'",
|
||||
**result,
|
||||
}
|
||||
|
||||
|
||||
@router.post("/{agent_id}/skills/local")
|
||||
async def create_local_skill(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
request: LocalSkillRequest,
|
||||
registry=Depends(get_registry),
|
||||
):
|
||||
agent_info = registry.get(agent_id)
|
||||
if not agent_info or agent_info.workspace_id != workspace_id:
|
||||
raise HTTPException(status_code=404, detail=f"Agent '{agent_id}' not found")
|
||||
|
||||
skills_manager = SkillsManager()
|
||||
try:
|
||||
skills_manager.create_agent_local_skill(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
skill_name=request.skill_name,
|
||||
)
|
||||
except (ValueError, FileExistsError) as exc:
|
||||
raise HTTPException(status_code=400, detail=str(exc))
|
||||
|
||||
return {"message": f"Created local skill '{request.skill_name}' for '{agent_id}'"}
|
||||
|
||||
|
||||
@router.put("/{agent_id}/skills/local/{skill_name}")
|
||||
async def update_local_skill(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
skill_name: str,
|
||||
request: LocalSkillContentRequest,
|
||||
registry=Depends(get_registry),
|
||||
):
|
||||
agent_info = registry.get(agent_id)
|
||||
if not agent_info or agent_info.workspace_id != workspace_id:
|
||||
raise HTTPException(status_code=404, detail=f"Agent '{agent_id}' not found")
|
||||
|
||||
skills_manager = SkillsManager()
|
||||
try:
|
||||
skills_manager.update_agent_local_skill(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
skill_name=skill_name,
|
||||
content=request.content,
|
||||
)
|
||||
except (ValueError, FileNotFoundError) as exc:
|
||||
raise HTTPException(status_code=400, detail=str(exc))
|
||||
|
||||
return {"message": f"Updated local skill '{skill_name}' for '{agent_id}'"}
|
||||
|
||||
|
||||
@router.delete("/{agent_id}/skills/local/{skill_name}")
|
||||
async def delete_local_skill(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
skill_name: str,
|
||||
registry=Depends(get_registry),
|
||||
):
|
||||
agent_info = registry.get(agent_id)
|
||||
if not agent_info or agent_info.workspace_id != workspace_id:
|
||||
raise HTTPException(status_code=404, detail=f"Agent '{agent_id}' not found")
|
||||
|
||||
skills_manager = SkillsManager()
|
||||
try:
|
||||
skills_manager.delete_agent_local_skill(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
skill_name=skill_name,
|
||||
)
|
||||
skills_manager.forget_agent_skill_overrides(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
skill_names=[skill_name],
|
||||
)
|
||||
except (ValueError, FileNotFoundError) as exc:
|
||||
raise HTTPException(status_code=400, detail=str(exc))
|
||||
|
||||
return {"message": f"Deleted local skill '{skill_name}' for '{agent_id}'"}
|
||||
|
||||
|
||||
@router.post("/{agent_id}/skills/upload")
|
||||
async def upload_external_skill(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
file: UploadFile = File(...),
|
||||
name: Optional[str] = Form(None),
|
||||
activate: bool = Form(True),
|
||||
registry=Depends(get_registry),
|
||||
):
|
||||
"""Upload a zip skill package from frontend and install for one agent."""
|
||||
agent_info = registry.get(agent_id)
|
||||
if not agent_info or agent_info.workspace_id != workspace_id:
|
||||
raise HTTPException(status_code=404, detail=f"Agent '{agent_id}' not found")
|
||||
|
||||
original_name = (file.filename or "").strip()
|
||||
if not original_name.lower().endswith(".zip"):
|
||||
raise HTTPException(status_code=400, detail="Uploaded file must be a .zip archive")
|
||||
|
||||
suffix = Path(original_name).suffix or ".zip"
|
||||
temp_path: Optional[str] = None
|
||||
try:
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp:
|
||||
temp_path = tmp.name
|
||||
content = await file.read()
|
||||
tmp.write(content)
|
||||
|
||||
skills_manager = SkillsManager()
|
||||
result = skills_manager.install_external_skill_for_agent(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
source=temp_path,
|
||||
skill_name=name,
|
||||
activate=activate,
|
||||
)
|
||||
except (FileNotFoundError, ValueError) as exc:
|
||||
raise HTTPException(status_code=400, detail=str(exc))
|
||||
finally:
|
||||
try:
|
||||
await file.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to close uploaded file: {e}")
|
||||
if temp_path and os.path.exists(temp_path):
|
||||
os.remove(temp_path)
|
||||
|
||||
return {
|
||||
"message": f"Uploaded and installed external skill '{result['skill_name']}' for '{agent_id}'",
|
||||
**result,
|
||||
}
|
||||
|
||||
|
||||
@router.get("/{agent_id}/files/{filename}", response_model=AgentFileResponse)
|
||||
async def get_agent_file(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
filename: str,
|
||||
workspace_manager: RunWorkspaceManager = Depends(get_workspace_manager),
|
||||
):
|
||||
"""
|
||||
Read an agent file from the run-scoped asset tree under `runs/<run_id>/`.
|
||||
|
||||
Args:
|
||||
workspace_id: Workspace identifier
|
||||
agent_id: Agent identifier
|
||||
filename: File to read (e.g., SOUL.md, PROFILE.md)
|
||||
|
||||
Returns:
|
||||
File content
|
||||
"""
|
||||
try:
|
||||
content = workspace_manager.load_agent_file(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
filename=filename,
|
||||
)
|
||||
return AgentFileResponse(
|
||||
filename=filename,
|
||||
content=content,
|
||||
**_runtime_scope_fields(),
|
||||
)
|
||||
except FileNotFoundError:
|
||||
raise HTTPException(status_code=404, detail=f"File '{filename}' not found")
|
||||
|
||||
|
||||
@router.put("/{agent_id}/files/{filename}", response_model=AgentFileResponse)
|
||||
async def update_agent_file(
|
||||
workspace_id: str,
|
||||
agent_id: str,
|
||||
filename: str,
|
||||
content: str = Body(..., media_type="text/plain"),
|
||||
workspace_manager: RunWorkspaceManager = Depends(get_workspace_manager),
|
||||
):
|
||||
"""
|
||||
Update an agent file in the run-scoped asset tree under `runs/<run_id>/`.
|
||||
|
||||
Args:
|
||||
workspace_id: Workspace identifier
|
||||
agent_id: Agent identifier
|
||||
filename: File to update
|
||||
content: New file content
|
||||
|
||||
Returns:
|
||||
Updated file information
|
||||
"""
|
||||
try:
|
||||
workspace_manager.update_agent_file(
|
||||
config_name=workspace_id,
|
||||
agent_id=agent_id,
|
||||
filename=filename,
|
||||
content=content,
|
||||
)
|
||||
return AgentFileResponse(
|
||||
filename=filename,
|
||||
content=content,
|
||||
**_runtime_scope_fields(),
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@@ -6,7 +6,7 @@ Provides REST API endpoints for runtime agent asset access under `runs/<run_id>/
|
||||
|
||||
This module separates runtime concerns from design-time workspace management:
|
||||
- `/api/runs/{run_id}/agents/*` - Runtime agent assets and configuration
|
||||
- `/api/workspaces/{workspace_id}/agents/*` - Design-time workspace registry (deprecated)
|
||||
- design-time workspace registry CRUD lives under `/api/workspaces/{workspace_id}/...`
|
||||
"""
|
||||
import logging
|
||||
import os
|
||||
|
||||
@@ -29,6 +29,17 @@ router = APIRouter(prefix="/api/runtime", tags=["runtime"])
|
||||
PROJECT_ROOT = Path(__file__).resolve().parents[2]
|
||||
|
||||
|
||||
def _normalize_schedule_mode(value: Any) -> str:
|
||||
"""Normalize schedule mode to the current public vocabulary.
|
||||
|
||||
`intraday` is kept as a backward-compatible alias for `interval`.
|
||||
"""
|
||||
mode = str(value or "daily").strip().lower()
|
||||
if mode == "intraday":
|
||||
return "interval"
|
||||
return mode or "daily"
|
||||
|
||||
|
||||
class RuntimeState:
|
||||
"""Thread-safe singleton for managing runtime state.
|
||||
|
||||
@@ -439,6 +450,11 @@ def _is_gateway_running() -> bool:
|
||||
|
||||
Checks both the internally-managed gateway process and falls back to
|
||||
port availability (for externally-managed gateway processes).
|
||||
|
||||
The fallback matters because this codebase may still encounter two startup
|
||||
shapes while historical artifacts remain in-tree:
|
||||
1. runtime_service-managed Gateway subprocesses
|
||||
2. externally started historical Gateway processes outside the supported dev flow
|
||||
"""
|
||||
process = _runtime_state.gateway_process
|
||||
if process is not None and process.poll() is None:
|
||||
@@ -481,7 +497,11 @@ def _start_gateway_process(
|
||||
bootstrap: Dict[str, Any],
|
||||
port: int
|
||||
) -> subprocess.Popen:
|
||||
"""Start Gateway as a separate process."""
|
||||
"""Start Gateway as a runtime_service-managed subprocess.
|
||||
|
||||
This path is used when runtime lifecycle is driven through the runtime API.
|
||||
It is not the only supported way a Gateway may exist in the current repo.
|
||||
"""
|
||||
# Validate configuration before starting
|
||||
validation_errors = _validate_gateway_config(bootstrap)
|
||||
if validation_errors:
|
||||
@@ -592,9 +612,9 @@ def _validate_gateway_config(bootstrap: Dict[str, Any]) -> List[str]:
|
||||
errors.append("Dates must be in YYYY-MM-DD format")
|
||||
|
||||
# Validate schedule mode
|
||||
schedule_mode = bootstrap.get("schedule_mode", "daily")
|
||||
if schedule_mode not in ("daily", "intraday"):
|
||||
errors.append(f"Invalid schedule_mode '{schedule_mode}': must be 'daily' or 'intraday'")
|
||||
schedule_mode = _normalize_schedule_mode(bootstrap.get("schedule_mode", "daily"))
|
||||
if schedule_mode not in ("daily", "interval"):
|
||||
errors.append(f"Invalid schedule_mode '{schedule_mode}': must be 'daily' or 'interval'")
|
||||
|
||||
return errors
|
||||
|
||||
@@ -778,7 +798,7 @@ async def get_runtime_mode() -> RuntimeModeResponse:
|
||||
mode=mode,
|
||||
is_backtest=mode == "backtest",
|
||||
run_id=context.get("config_name"),
|
||||
schedule_mode=bootstrap.get("schedule_mode"),
|
||||
schedule_mode=_normalize_schedule_mode(bootstrap.get("schedule_mode")),
|
||||
is_running=True,
|
||||
)
|
||||
except HTTPException:
|
||||
@@ -909,7 +929,7 @@ def _resolve_runtime_response(run_id: str) -> RuntimeConfigResponse:
|
||||
project_root=PROJECT_ROOT,
|
||||
config_name=run_id,
|
||||
enable_memory=bool(bootstrap.get("enable_memory", False)),
|
||||
schedule_mode=str(bootstrap.get("schedule_mode", "daily")),
|
||||
schedule_mode=_normalize_schedule_mode(bootstrap.get("schedule_mode", "daily")),
|
||||
interval_minutes=int(bootstrap.get("interval_minutes", 60) or 60),
|
||||
trigger_time=str(bootstrap.get("trigger_time", "09:30") or "09:30"),
|
||||
)
|
||||
@@ -929,11 +949,11 @@ def _normalize_runtime_config_updates(
|
||||
updates: Dict[str, Any] = {}
|
||||
|
||||
if request.schedule_mode is not None:
|
||||
schedule_mode = str(request.schedule_mode).strip().lower()
|
||||
if schedule_mode not in {"daily", "intraday"}:
|
||||
schedule_mode = _normalize_schedule_mode(request.schedule_mode)
|
||||
if schedule_mode not in {"daily", "interval"}:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="schedule_mode must be 'daily' or 'intraday'",
|
||||
detail="schedule_mode must be 'daily' or 'interval'",
|
||||
)
|
||||
updates["schedule_mode"] = schedule_mode
|
||||
|
||||
|
||||
@@ -31,8 +31,7 @@ def _build_scope_payload(project_root: Path) -> dict[str, object]:
|
||||
},
|
||||
"agent_route_note": (
|
||||
"Runtime routes use `/api/runs/{run_id}/agents/...`. "
|
||||
"Legacy `/api/workspaces/{workspace_id}/agents/...` routes are deprecated "
|
||||
"but remain for backward compatibility."
|
||||
"Design-time CRUD routes use `/api/workspaces/{workspace_id}/agents/...`."
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,12 @@ Pipeline Runner - Independent trading pipeline execution
|
||||
|
||||
This module provides functions to start/stop trading pipelines
|
||||
that can be called from the REST API.
|
||||
|
||||
COMPATIBILITY_NOTE:
|
||||
This module still carries selected fallback creation paths used by managed
|
||||
runtime startup and compatibility flows. New runtime behavior should be judged
|
||||
against the run-scoped helpers and current pipeline selection rules rather than
|
||||
assuming every constructor here is the long-term default.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -11,6 +17,7 @@ from __future__ import annotations
|
||||
import asyncio
|
||||
import os
|
||||
from contextlib import AsyncExitStack
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Callable
|
||||
|
||||
@@ -22,7 +29,7 @@ from backend.agents.prompt_loader import get_prompt_loader
|
||||
from backend.agents.workspace_manager import WorkspaceManager
|
||||
from backend.config.constants import ANALYST_TYPES
|
||||
from backend.core.pipeline import TradingPipeline
|
||||
from backend.core.scheduler import BacktestScheduler, Scheduler
|
||||
from backend.core.scheduler import BacktestScheduler, Scheduler, normalize_schedule_mode
|
||||
from backend.llm.models import get_agent_formatter, get_agent_model
|
||||
from backend.runtime.manager import (
|
||||
TradingRuntimeManager,
|
||||
@@ -46,6 +53,21 @@ _gateway_instance: Optional[Gateway] = None
|
||||
_long_term_memories: List[Any] = []
|
||||
|
||||
|
||||
@dataclass
|
||||
class GatewayRuntimeBundle:
|
||||
"""Assembled runtime components for a Gateway-backed execution path."""
|
||||
|
||||
runtime_manager: TradingRuntimeManager
|
||||
market_service: MarketService
|
||||
storage_service: StorageService
|
||||
pipeline: TradingPipeline
|
||||
gateway: Gateway
|
||||
scheduler: Optional[Scheduler]
|
||||
scheduler_callback: Optional[Callable]
|
||||
long_term_memories: List[Any]
|
||||
trading_dates: List[str]
|
||||
|
||||
|
||||
def _set_gateway(gateway: Optional[Gateway]) -> None:
|
||||
"""Set global gateway reference."""
|
||||
global _gateway_instance
|
||||
@@ -443,6 +465,151 @@ def create_agents(
|
||||
return analysts, risk_manager, portfolio_manager, long_term_memories
|
||||
|
||||
|
||||
def build_gateway_runtime_bundle(
|
||||
*,
|
||||
run_id: str,
|
||||
run_dir: Path,
|
||||
bootstrap: Dict[str, Any],
|
||||
poll_interval: int = 10,
|
||||
) -> GatewayRuntimeBundle:
|
||||
"""Build the full Gateway runtime component graph for one run."""
|
||||
tickers = bootstrap.get("tickers", ["AAPL", "MSFT", "GOOGL", "AMZN", "NVDA", "META", "TSLA", "AMD", "NFLX", "AVGO", "PLTR", "COIN"])
|
||||
initial_cash = float(bootstrap.get("initial_cash", 100000.0))
|
||||
margin_requirement = float(bootstrap.get("margin_requirement", 0.0))
|
||||
max_comm_cycles = int(bootstrap.get("max_comm_cycles", 2))
|
||||
schedule_mode = normalize_schedule_mode(bootstrap.get("schedule_mode", "daily"))
|
||||
trigger_time = bootstrap.get("trigger_time", "09:30")
|
||||
interval_minutes = int(bootstrap.get("interval_minutes", 60))
|
||||
heartbeat_interval = int(bootstrap.get("heartbeat_interval", 0))
|
||||
mode = bootstrap.get("mode", "live")
|
||||
start_date = bootstrap.get("start_date")
|
||||
end_date = bootstrap.get("end_date")
|
||||
enable_memory = bootstrap.get("enable_memory", False)
|
||||
|
||||
is_backtest = mode == "backtest"
|
||||
|
||||
runtime_manager = TradingRuntimeManager(
|
||||
config_name=run_id,
|
||||
run_dir=run_dir,
|
||||
bootstrap=bootstrap,
|
||||
)
|
||||
runtime_manager.prepare_run()
|
||||
|
||||
market_service = MarketService(
|
||||
tickers=tickers,
|
||||
poll_interval=poll_interval,
|
||||
backtest_mode=is_backtest,
|
||||
api_key=os.getenv("FINNHUB_API_KEY") if not is_backtest else None,
|
||||
backtest_start_date=start_date if is_backtest else None,
|
||||
backtest_end_date=end_date if is_backtest else None,
|
||||
)
|
||||
|
||||
storage_service = StorageService(
|
||||
dashboard_dir=run_dir / "team_dashboard",
|
||||
initial_cash=initial_cash,
|
||||
config_name=run_id,
|
||||
)
|
||||
if not storage_service.files["summary"].exists():
|
||||
storage_service.initialize_empty_dashboard()
|
||||
else:
|
||||
storage_service.update_leaderboard_model_info()
|
||||
|
||||
analysts, risk_manager, pm, long_term_memories = create_agents(
|
||||
run_id=run_id,
|
||||
run_dir=run_dir,
|
||||
initial_cash=initial_cash,
|
||||
margin_requirement=margin_requirement,
|
||||
enable_long_term_memory=enable_memory,
|
||||
)
|
||||
for agent in analysts + [risk_manager, pm]:
|
||||
agent_id = getattr(agent, "agent_id", None) or getattr(agent, "name", None)
|
||||
if agent_id:
|
||||
runtime_manager.register_agent(agent_id)
|
||||
|
||||
portfolio_state = storage_service.load_portfolio_state()
|
||||
pm.load_portfolio_state(portfolio_state)
|
||||
|
||||
settlement_coordinator = SettlementCoordinator(
|
||||
storage=storage_service,
|
||||
initial_capital=initial_cash,
|
||||
)
|
||||
pipeline = TradingPipeline(
|
||||
analysts=analysts,
|
||||
risk_manager=risk_manager,
|
||||
portfolio_manager=pm,
|
||||
settlement_coordinator=settlement_coordinator,
|
||||
max_comm_cycles=max_comm_cycles,
|
||||
runtime_manager=runtime_manager,
|
||||
)
|
||||
|
||||
scheduler_callback = None
|
||||
live_scheduler = None
|
||||
trading_dates: List[str] = []
|
||||
|
||||
if is_backtest:
|
||||
backtest_scheduler = BacktestScheduler(
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
trading_calendar="NYSE",
|
||||
delay_between_days=0.5,
|
||||
)
|
||||
trading_dates = backtest_scheduler.get_trading_dates()
|
||||
|
||||
async def scheduler_callback_fn(callback):
|
||||
await backtest_scheduler.start(callback)
|
||||
|
||||
scheduler_callback = scheduler_callback_fn
|
||||
else:
|
||||
live_scheduler = Scheduler(
|
||||
mode=schedule_mode,
|
||||
trigger_time=trigger_time,
|
||||
interval_minutes=interval_minutes,
|
||||
heartbeat_interval=heartbeat_interval if heartbeat_interval > 0 else None,
|
||||
config={"config_name": run_id},
|
||||
)
|
||||
|
||||
async def scheduler_callback_fn(callback):
|
||||
await live_scheduler.start(callback)
|
||||
|
||||
scheduler_callback = scheduler_callback_fn
|
||||
|
||||
gateway = Gateway(
|
||||
market_service=market_service,
|
||||
storage_service=storage_service,
|
||||
pipeline=pipeline,
|
||||
scheduler_callback=scheduler_callback,
|
||||
config={
|
||||
"mode": mode,
|
||||
"backtest_mode": is_backtest,
|
||||
"tickers": tickers,
|
||||
"config_name": run_id,
|
||||
"schedule_mode": schedule_mode,
|
||||
"interval_minutes": interval_minutes,
|
||||
"trigger_time": trigger_time,
|
||||
"heartbeat_interval": heartbeat_interval,
|
||||
"initial_cash": initial_cash,
|
||||
"margin_requirement": margin_requirement,
|
||||
"max_comm_cycles": max_comm_cycles,
|
||||
"enable_memory": enable_memory,
|
||||
},
|
||||
scheduler=live_scheduler,
|
||||
)
|
||||
if is_backtest:
|
||||
gateway.set_backtest_dates(trading_dates)
|
||||
|
||||
return GatewayRuntimeBundle(
|
||||
runtime_manager=runtime_manager,
|
||||
market_service=market_service,
|
||||
storage_service=storage_service,
|
||||
pipeline=pipeline,
|
||||
gateway=gateway,
|
||||
scheduler=live_scheduler,
|
||||
scheduler_callback=scheduler_callback,
|
||||
long_term_memories=long_term_memories,
|
||||
trading_dates=trading_dates,
|
||||
)
|
||||
|
||||
|
||||
async def run_pipeline(
|
||||
run_id: str,
|
||||
run_dir: Path,
|
||||
@@ -483,7 +650,7 @@ async def run_pipeline(
|
||||
initial_cash = float(bootstrap.get("initial_cash", 100000.0))
|
||||
margin_requirement = float(bootstrap.get("margin_requirement", 0.0))
|
||||
max_comm_cycles = int(bootstrap.get("max_comm_cycles", 2))
|
||||
schedule_mode = bootstrap.get("schedule_mode", "daily")
|
||||
schedule_mode = normalize_schedule_mode(bootstrap.get("schedule_mode", "daily"))
|
||||
trigger_time = bootstrap.get("trigger_time", "09:30")
|
||||
interval_minutes = int(bootstrap.get("interval_minutes", 60))
|
||||
heartbeat_interval = int(bootstrap.get("heartbeat_interval", 0))
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Gateway Server - Entry point for Gateway subprocess.
|
||||
"""Gateway Server - Entry point for the managed Gateway subprocess.
|
||||
|
||||
This module is launched as a subprocess by the Control Plane (FastAPI)
|
||||
to run the Data Plane (Gateway + Pipeline).
|
||||
This module is launched by `runtime_service` when the runtime API is used to
|
||||
spawn a run-scoped Gateway process.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from contextlib import AsyncExitStack
|
||||
from pathlib import Path
|
||||
@@ -19,22 +18,13 @@ from dotenv import load_dotenv
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
from backend.agents.prompt_loader import get_prompt_loader
|
||||
from backend.core.pipeline import TradingPipeline
|
||||
from backend.core.pipeline_runner import create_agents
|
||||
from backend.core.scheduler import BacktestScheduler, Scheduler
|
||||
from backend.core.pipeline_runner import build_gateway_runtime_bundle
|
||||
from backend.runtime.manager import (
|
||||
TradingRuntimeManager,
|
||||
set_global_runtime_manager,
|
||||
clear_global_runtime_manager,
|
||||
)
|
||||
from backend.services.gateway import Gateway
|
||||
from backend.services.market import MarketService
|
||||
from backend.services.storage import StorageService
|
||||
from backend.utils.settlement import SettlementCoordinator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
_prompt_loader = get_prompt_loader()
|
||||
|
||||
|
||||
INFO_LOGGER_PREFIXES = (
|
||||
@@ -110,153 +100,24 @@ async def run_gateway(
|
||||
port: int
|
||||
):
|
||||
"""Run Gateway with Pipeline."""
|
||||
|
||||
# Extract config
|
||||
tickers = bootstrap.get("tickers", ["AAPL", "MSFT", "GOOGL", "AMZN", "NVDA", "META", "TSLA", "AMD", "NFLX", "AVGO", "PLTR", "COIN"])
|
||||
initial_cash = float(bootstrap.get("initial_cash", 100000.0))
|
||||
margin_requirement = float(bootstrap.get("margin_requirement", 0.0))
|
||||
max_comm_cycles = int(bootstrap.get("max_comm_cycles", 2))
|
||||
schedule_mode = bootstrap.get("schedule_mode", "daily")
|
||||
trigger_time = bootstrap.get("trigger_time", "09:30")
|
||||
interval_minutes = int(bootstrap.get("interval_minutes", 60))
|
||||
heartbeat_interval = int(bootstrap.get("heartbeat_interval", 0)) # 0 = disabled
|
||||
mode = bootstrap.get("mode", "live")
|
||||
start_date = bootstrap.get("start_date")
|
||||
end_date = bootstrap.get("end_date")
|
||||
enable_memory = bootstrap.get("enable_memory", False)
|
||||
poll_interval = int(bootstrap.get("poll_interval", 10))
|
||||
|
||||
is_backtest = mode == "backtest"
|
||||
|
||||
logger.info(f"[Gateway Server] Starting run {run_id} on port {port}")
|
||||
|
||||
# Create runtime manager
|
||||
runtime_manager = TradingRuntimeManager(
|
||||
config_name=run_id,
|
||||
run_dir=run_dir,
|
||||
bootstrap=bootstrap,
|
||||
)
|
||||
runtime_manager.prepare_run()
|
||||
set_global_runtime_manager(runtime_manager)
|
||||
|
||||
try:
|
||||
async with AsyncExitStack() as stack:
|
||||
# Create services
|
||||
market_service = MarketService(
|
||||
tickers=tickers,
|
||||
poll_interval=poll_interval,
|
||||
backtest_mode=is_backtest,
|
||||
api_key=os.getenv("FINNHUB_API_KEY") if not is_backtest else None,
|
||||
backtest_start_date=start_date if is_backtest else None,
|
||||
backtest_end_date=end_date if is_backtest else None,
|
||||
)
|
||||
|
||||
storage_service = StorageService(
|
||||
dashboard_dir=run_dir / "team_dashboard",
|
||||
initial_cash=initial_cash,
|
||||
config_name=run_id,
|
||||
)
|
||||
|
||||
if not storage_service.files["summary"].exists():
|
||||
storage_service.initialize_empty_dashboard()
|
||||
else:
|
||||
storage_service.update_leaderboard_model_info()
|
||||
|
||||
# Create agents
|
||||
analysts, risk_manager, pm, long_term_memories = create_agents(
|
||||
bundle = build_gateway_runtime_bundle(
|
||||
run_id=run_id,
|
||||
run_dir=run_dir,
|
||||
initial_cash=initial_cash,
|
||||
margin_requirement=margin_requirement,
|
||||
enable_long_term_memory=enable_memory,
|
||||
bootstrap=bootstrap,
|
||||
poll_interval=poll_interval,
|
||||
)
|
||||
set_global_runtime_manager(bundle.runtime_manager)
|
||||
|
||||
# Register agents
|
||||
for agent in analysts + [risk_manager, pm]:
|
||||
agent_id = getattr(agent, "agent_id", None) or getattr(agent, "name", None)
|
||||
if agent_id:
|
||||
runtime_manager.register_agent(agent_id)
|
||||
|
||||
# Load portfolio state
|
||||
portfolio_state = storage_service.load_portfolio_state()
|
||||
pm.load_portfolio_state(portfolio_state)
|
||||
|
||||
# Create settlement coordinator
|
||||
settlement_coordinator = SettlementCoordinator(
|
||||
storage=storage_service,
|
||||
initial_capital=initial_cash,
|
||||
)
|
||||
|
||||
# Create pipeline
|
||||
pipeline = TradingPipeline(
|
||||
analysts=analysts,
|
||||
risk_manager=risk_manager,
|
||||
portfolio_manager=pm,
|
||||
settlement_coordinator=settlement_coordinator,
|
||||
max_comm_cycles=max_comm_cycles,
|
||||
runtime_manager=runtime_manager,
|
||||
)
|
||||
|
||||
# Create scheduler
|
||||
scheduler_callback = None
|
||||
live_scheduler = None
|
||||
|
||||
if is_backtest:
|
||||
backtest_scheduler = BacktestScheduler(
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
trading_calendar="NYSE",
|
||||
delay_between_days=0.5,
|
||||
)
|
||||
|
||||
async def scheduler_callback_fn(callback):
|
||||
await backtest_scheduler.start(callback)
|
||||
|
||||
scheduler_callback = scheduler_callback_fn
|
||||
else:
|
||||
live_scheduler = Scheduler(
|
||||
mode=schedule_mode,
|
||||
trigger_time=trigger_time,
|
||||
interval_minutes=interval_minutes,
|
||||
heartbeat_interval=heartbeat_interval if heartbeat_interval > 0 else None,
|
||||
config={"config_name": run_id},
|
||||
)
|
||||
|
||||
async def scheduler_callback_fn(callback):
|
||||
await live_scheduler.start(callback)
|
||||
|
||||
scheduler_callback = scheduler_callback_fn
|
||||
|
||||
# Enter long-term memory contexts
|
||||
for memory in long_term_memories:
|
||||
async with AsyncExitStack() as stack:
|
||||
for memory in bundle.long_term_memories:
|
||||
await stack.enter_async_context(memory)
|
||||
|
||||
# Create Gateway
|
||||
gateway = Gateway(
|
||||
market_service=market_service,
|
||||
storage_service=storage_service,
|
||||
pipeline=pipeline,
|
||||
scheduler_callback=scheduler_callback,
|
||||
config={
|
||||
"mode": mode,
|
||||
"backtest_mode": is_backtest,
|
||||
"tickers": tickers,
|
||||
"config_name": run_id,
|
||||
"schedule_mode": schedule_mode,
|
||||
"interval_minutes": interval_minutes,
|
||||
"trigger_time": trigger_time,
|
||||
"heartbeat_interval": heartbeat_interval,
|
||||
"initial_cash": initial_cash,
|
||||
"margin_requirement": margin_requirement,
|
||||
"max_comm_cycles": max_comm_cycles,
|
||||
"enable_memory": enable_memory,
|
||||
},
|
||||
scheduler=live_scheduler,
|
||||
)
|
||||
|
||||
# Start Gateway (blocks until shutdown)
|
||||
logger.info(f"[Gateway Server] Gateway starting on port {port}")
|
||||
await gateway.start(host="0.0.0.0", port=port)
|
||||
await bundle.gateway.start(host="0.0.0.0", port=port)
|
||||
|
||||
except asyncio.CancelledError:
|
||||
logger.info("[Gateway Server] Cancelled")
|
||||
|
||||
596
backend/main.py
596
backend/main.py
@@ -1,596 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Main Entry Point
|
||||
Supports: backtest, live modes
|
||||
"""
|
||||
import argparse
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
from contextlib import AsyncExitStack
|
||||
from pathlib import Path
|
||||
import loguru
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from backend.agents import EvoAgent
|
||||
from backend.agents.agent_workspace import load_agent_workspace_config
|
||||
from backend.agents.skills_manager import SkillsManager
|
||||
from backend.agents.toolkit_factory import create_agent_toolkit, load_agent_profiles
|
||||
from backend.agents.prompt_loader import get_prompt_loader
|
||||
# WorkspaceManager is RunWorkspaceManager - provides run-scoped asset management
|
||||
# All runtime state lives under runs/<run_id>/
|
||||
from backend.agents.workspace_manager import WorkspaceManager
|
||||
from backend.config.bootstrap_config import resolve_runtime_config
|
||||
from backend.config.constants import ANALYST_TYPES
|
||||
from backend.core.pipeline import TradingPipeline
|
||||
from backend.core.scheduler import BacktestScheduler, Scheduler
|
||||
from backend.llm.models import get_agent_formatter, get_agent_model
|
||||
from backend.api.runtime import unregister_runtime_manager
|
||||
from backend.runtime.manager import (
|
||||
TradingRuntimeManager,
|
||||
set_global_runtime_manager,
|
||||
clear_global_runtime_manager,
|
||||
)
|
||||
from backend.gateway_server import configure_gateway_logging
|
||||
from backend.services.gateway import Gateway
|
||||
from backend.services.market import MarketService
|
||||
from backend.services.storage import StorageService
|
||||
from backend.utils.settlement import SettlementCoordinator
|
||||
|
||||
load_dotenv()
|
||||
logger = logging.getLogger(__name__)
|
||||
loguru.logger.disable("flowllm")
|
||||
loguru.logger.disable("reme_ai")
|
||||
configure_gateway_logging(verbose=os.getenv("LOG_LEVEL", "").upper() == "DEBUG")
|
||||
_prompt_loader = get_prompt_loader()
|
||||
|
||||
|
||||
def _get_run_dir(config_name: str) -> Path:
|
||||
"""Return the canonical run-scoped directory for a config.
|
||||
|
||||
This is the authoritative path for runtime state under runs/<run_id>/.
|
||||
All runtime assets, state, and exports are scoped to this directory.
|
||||
"""
|
||||
project_root = Path(__file__).resolve().parents[1]
|
||||
# Use RunWorkspaceManager for run-scoped path resolution
|
||||
return WorkspaceManager(project_root=project_root).get_run_dir(config_name)
|
||||
|
||||
|
||||
def _resolve_runtime_config(args) -> dict:
|
||||
"""Merge env defaults with run-scoped bootstrap config."""
|
||||
project_root = Path(__file__).resolve().parents[1]
|
||||
return resolve_runtime_config(
|
||||
project_root=project_root,
|
||||
config_name=args.config_name,
|
||||
enable_memory=args.enable_memory,
|
||||
schedule_mode=args.schedule_mode,
|
||||
interval_minutes=args.interval_minutes,
|
||||
trigger_time=args.trigger_time,
|
||||
)
|
||||
|
||||
|
||||
def create_long_term_memory(agent_name: str, config_name: str):
|
||||
"""
|
||||
Create ReMeTaskLongTermMemory for an agent
|
||||
|
||||
Requires DASHSCOPE_API_KEY env var
|
||||
"""
|
||||
from agentscope.memory import ReMeTaskLongTermMemory
|
||||
from agentscope.model import DashScopeChatModel
|
||||
from agentscope.embedding import DashScopeTextEmbedding
|
||||
|
||||
api_key = os.getenv("MEMORY_API_KEY")
|
||||
if not api_key:
|
||||
logger.warning("MEMORY_API_KEY not set, long-term memory disabled")
|
||||
return None
|
||||
|
||||
memory_dir = str(_get_run_dir(config_name) / "memory")
|
||||
|
||||
return ReMeTaskLongTermMemory(
|
||||
agent_name=agent_name,
|
||||
user_name=agent_name,
|
||||
model=DashScopeChatModel(
|
||||
model_name=os.getenv("MEMORY_MODEL_NAME", "qwen3-max"),
|
||||
api_key=api_key,
|
||||
stream=False,
|
||||
),
|
||||
embedding_model=DashScopeTextEmbedding(
|
||||
model_name=os.getenv(
|
||||
"MEMORY_EMBEDDING_MODEL",
|
||||
"text-embedding-v4",
|
||||
),
|
||||
api_key=api_key,
|
||||
dimensions=1024,
|
||||
),
|
||||
**{
|
||||
"vector_store.default.backend": "local",
|
||||
"vector_store.default.params.store_dir": memory_dir,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def _resolve_evo_agent_ids() -> set[str]:
|
||||
"""Return agent ids selected to use EvoAgent.
|
||||
|
||||
By default, all supported roles use EvoAgent.
|
||||
EVO_AGENT_IDS can be used to limit to specific roles (legacy behavior).
|
||||
Set EVO_AGENT_LEGACY=1 to disable EvoAgent entirely.
|
||||
|
||||
Supported roles:
|
||||
- analyst roles (fundamentals, technical, sentiment, valuation)
|
||||
- risk_manager
|
||||
- portfolio_manager
|
||||
|
||||
Example:
|
||||
EVO_AGENT_IDS=fundamentals_analyst,risk_manager,portfolio_manager
|
||||
"""
|
||||
from backend.config.constants import ANALYST_TYPES
|
||||
|
||||
all_supported = set(ANALYST_TYPES) | {"risk_manager", "portfolio_manager"}
|
||||
|
||||
raw = os.getenv("EVO_AGENT_IDS", "")
|
||||
if not raw.strip():
|
||||
# Default: all supported roles use EvoAgent
|
||||
return all_supported
|
||||
|
||||
if raw.strip().lower() in ("legacy", "old", "none"):
|
||||
return set()
|
||||
|
||||
requested = {
|
||||
item.strip()
|
||||
for item in raw.split(",")
|
||||
if item.strip()
|
||||
}
|
||||
return {
|
||||
agent_id
|
||||
for agent_id in requested
|
||||
if agent_id in ANALYST_TYPES or agent_id in {"risk_manager", "portfolio_manager"}
|
||||
}
|
||||
|
||||
|
||||
def _create_analyst_agent(
|
||||
*,
|
||||
analyst_type: str,
|
||||
config_name: str,
|
||||
model,
|
||||
formatter,
|
||||
skills_manager: SkillsManager,
|
||||
active_skill_map: dict[str, list[Path]],
|
||||
long_term_memory=None,
|
||||
):
|
||||
"""Create one analyst agent, optionally using EvoAgent."""
|
||||
active_skill_dirs = active_skill_map.get(analyst_type, [])
|
||||
toolkit = create_agent_toolkit(
|
||||
analyst_type,
|
||||
config_name,
|
||||
active_skill_dirs=active_skill_dirs,
|
||||
)
|
||||
|
||||
workspace_dir = skills_manager.get_agent_asset_dir(config_name, analyst_type)
|
||||
agent_config = load_agent_workspace_config(workspace_dir / "agent.yaml")
|
||||
agent = EvoAgent(
|
||||
agent_id=analyst_type,
|
||||
config_name=config_name,
|
||||
workspace_dir=workspace_dir,
|
||||
model=model,
|
||||
formatter=formatter,
|
||||
skills_manager=skills_manager,
|
||||
prompt_files=agent_config.prompt_files,
|
||||
long_term_memory=long_term_memory,
|
||||
)
|
||||
agent.toolkit = toolkit
|
||||
setattr(agent, "run_id", config_name)
|
||||
# Keep workspace_id for backward compatibility
|
||||
setattr(agent, "workspace_id", config_name)
|
||||
return agent
|
||||
|
||||
|
||||
def _create_risk_manager_agent(
|
||||
*,
|
||||
config_name: str,
|
||||
model,
|
||||
formatter,
|
||||
skills_manager: SkillsManager,
|
||||
active_skill_map: dict[str, list[Path]],
|
||||
long_term_memory=None,
|
||||
):
|
||||
"""Create the risk manager, optionally using EvoAgent."""
|
||||
active_skill_dirs = active_skill_map.get("risk_manager", [])
|
||||
toolkit = create_agent_toolkit(
|
||||
"risk_manager",
|
||||
config_name,
|
||||
active_skill_dirs=active_skill_dirs,
|
||||
)
|
||||
|
||||
use_evo_agent = "risk_manager" in _resolve_evo_agent_ids()
|
||||
|
||||
if use_evo_agent:
|
||||
workspace_dir = skills_manager.get_agent_asset_dir(config_name, "risk_manager")
|
||||
agent_config = load_agent_workspace_config(workspace_dir / "agent.yaml")
|
||||
agent = EvoAgent(
|
||||
agent_id="risk_manager",
|
||||
config_name=config_name,
|
||||
workspace_dir=workspace_dir,
|
||||
model=model,
|
||||
formatter=formatter,
|
||||
skills_manager=skills_manager,
|
||||
prompt_files=agent_config.prompt_files,
|
||||
long_term_memory=long_term_memory,
|
||||
)
|
||||
agent.toolkit = toolkit
|
||||
setattr(agent, "run_id", config_name)
|
||||
# Keep workspace_id for backward compatibility
|
||||
setattr(agent, "workspace_id", config_name)
|
||||
return agent
|
||||
|
||||
return RiskAgent(
|
||||
model=model,
|
||||
formatter=formatter,
|
||||
name="risk_manager",
|
||||
config={"config_name": config_name},
|
||||
long_term_memory=long_term_memory,
|
||||
toolkit=toolkit,
|
||||
)
|
||||
|
||||
|
||||
def _create_portfolio_manager_agent(
|
||||
*,
|
||||
config_name: str,
|
||||
model,
|
||||
formatter,
|
||||
initial_cash: float,
|
||||
margin_requirement: float,
|
||||
skills_manager: SkillsManager,
|
||||
active_skill_map: dict[str, list[Path]],
|
||||
long_term_memory=None,
|
||||
):
|
||||
"""Create the portfolio manager, optionally using EvoAgent."""
|
||||
active_skill_dirs = active_skill_map.get("portfolio_manager", [])
|
||||
use_evo_agent = "portfolio_manager" in _resolve_evo_agent_ids()
|
||||
|
||||
if use_evo_agent:
|
||||
workspace_dir = skills_manager.get_agent_asset_dir(
|
||||
config_name,
|
||||
"portfolio_manager",
|
||||
)
|
||||
agent_config = load_agent_workspace_config(workspace_dir / "agent.yaml")
|
||||
agent = EvoAgent(
|
||||
agent_id="portfolio_manager",
|
||||
config_name=config_name,
|
||||
workspace_dir=workspace_dir,
|
||||
model=model,
|
||||
formatter=formatter,
|
||||
skills_manager=skills_manager,
|
||||
prompt_files=agent_config.prompt_files,
|
||||
initial_cash=initial_cash,
|
||||
margin_requirement=margin_requirement,
|
||||
long_term_memory=long_term_memory,
|
||||
)
|
||||
agent.toolkit = create_agent_toolkit(
|
||||
"portfolio_manager",
|
||||
config_name,
|
||||
owner=agent,
|
||||
active_skill_dirs=active_skill_dirs,
|
||||
)
|
||||
setattr(agent, "run_id", config_name)
|
||||
# Keep workspace_id for backward compatibility
|
||||
setattr(agent, "workspace_id", config_name)
|
||||
return agent
|
||||
|
||||
return PMAgent(
|
||||
name="portfolio_manager",
|
||||
model=model,
|
||||
formatter=formatter,
|
||||
initial_cash=initial_cash,
|
||||
margin_requirement=margin_requirement,
|
||||
config={"config_name": config_name},
|
||||
long_term_memory=long_term_memory,
|
||||
toolkit_factory=create_agent_toolkit,
|
||||
toolkit_factory_kwargs={
|
||||
"active_skill_dirs": active_skill_dirs,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def create_agents(
|
||||
config_name: str,
|
||||
initial_cash: float,
|
||||
margin_requirement: float,
|
||||
enable_long_term_memory: bool = False,
|
||||
):
|
||||
"""Create all agents for the system
|
||||
|
||||
Returns:
|
||||
tuple: (analysts, risk_manager, portfolio_manager, long_term_memories)
|
||||
long_term_memories is a list of memory
|
||||
"""
|
||||
analysts = []
|
||||
long_term_memories = []
|
||||
workspace_manager = WorkspaceManager()
|
||||
workspace_manager.initialize_default_assets(
|
||||
config_name=config_name,
|
||||
agent_ids=list(ANALYST_TYPES.keys())
|
||||
+ ["risk_manager", "portfolio_manager"],
|
||||
analyst_personas=_prompt_loader.load_yaml_config("analyst", "personas"),
|
||||
)
|
||||
profiles = load_agent_profiles()
|
||||
skills_manager = SkillsManager()
|
||||
active_skill_map = skills_manager.prepare_active_skills(
|
||||
config_name=config_name,
|
||||
agent_defaults={
|
||||
agent_id: profile.get("skills", [])
|
||||
for agent_id, profile in profiles.items()
|
||||
},
|
||||
)
|
||||
|
||||
for analyst_type in ANALYST_TYPES:
|
||||
model = get_agent_model(analyst_type)
|
||||
formatter = get_agent_formatter(analyst_type)
|
||||
|
||||
long_term_memory = None
|
||||
if enable_long_term_memory:
|
||||
long_term_memory = create_long_term_memory(
|
||||
analyst_type,
|
||||
config_name,
|
||||
)
|
||||
if long_term_memory:
|
||||
long_term_memories.append(long_term_memory)
|
||||
|
||||
analyst = _create_analyst_agent(
|
||||
analyst_type=analyst_type,
|
||||
config_name=config_name,
|
||||
model=model,
|
||||
formatter=formatter,
|
||||
skills_manager=skills_manager,
|
||||
active_skill_map=active_skill_map,
|
||||
long_term_memory=long_term_memory,
|
||||
)
|
||||
analysts.append(analyst)
|
||||
|
||||
risk_long_term_memory = None
|
||||
if enable_long_term_memory:
|
||||
risk_long_term_memory = create_long_term_memory(
|
||||
"risk_manager",
|
||||
config_name,
|
||||
)
|
||||
if risk_long_term_memory:
|
||||
long_term_memories.append(risk_long_term_memory)
|
||||
|
||||
risk_manager = _create_risk_manager_agent(
|
||||
config_name=config_name,
|
||||
model=get_agent_model("risk_manager"),
|
||||
formatter=get_agent_formatter("risk_manager"),
|
||||
skills_manager=skills_manager,
|
||||
active_skill_map=active_skill_map,
|
||||
long_term_memory=risk_long_term_memory,
|
||||
)
|
||||
|
||||
pm_long_term_memory = None
|
||||
if enable_long_term_memory:
|
||||
pm_long_term_memory = create_long_term_memory(
|
||||
"portfolio_manager",
|
||||
config_name,
|
||||
)
|
||||
if pm_long_term_memory:
|
||||
long_term_memories.append(pm_long_term_memory)
|
||||
|
||||
portfolio_manager = _create_portfolio_manager_agent(
|
||||
config_name=config_name,
|
||||
model=get_agent_model("portfolio_manager"),
|
||||
formatter=get_agent_formatter("portfolio_manager"),
|
||||
initial_cash=initial_cash,
|
||||
margin_requirement=margin_requirement,
|
||||
skills_manager=skills_manager,
|
||||
active_skill_map=active_skill_map,
|
||||
long_term_memory=pm_long_term_memory,
|
||||
)
|
||||
|
||||
return analysts, risk_manager, portfolio_manager, long_term_memories
|
||||
async def run_with_gateway(args):
|
||||
"""Run with WebSocket gateway"""
|
||||
is_backtest = args.mode == "backtest"
|
||||
runtime_config = _resolve_runtime_config(args)
|
||||
|
||||
config_name = args.config_name
|
||||
tickers = runtime_config["tickers"]
|
||||
initial_cash = runtime_config["initial_cash"]
|
||||
margin_requirement = runtime_config["margin_requirement"]
|
||||
|
||||
runtime_manager = TradingRuntimeManager(
|
||||
config_name=config_name,
|
||||
run_dir=_get_run_dir(config_name),
|
||||
bootstrap=runtime_config,
|
||||
)
|
||||
runtime_manager.prepare_run()
|
||||
set_global_runtime_manager(runtime_manager)
|
||||
|
||||
# Create market service
|
||||
market_service = MarketService(
|
||||
tickers=tickers,
|
||||
poll_interval=args.poll_interval,
|
||||
backtest_mode=is_backtest,
|
||||
api_key=os.getenv("FINNHUB_API_KEY") if not is_backtest else None,
|
||||
backtest_start_date=args.start_date if is_backtest else None,
|
||||
backtest_end_date=args.end_date if is_backtest else None,
|
||||
)
|
||||
|
||||
# Create storage service
|
||||
storage_service = StorageService(
|
||||
dashboard_dir=_get_run_dir(config_name) / "team_dashboard",
|
||||
initial_cash=initial_cash,
|
||||
config_name=config_name,
|
||||
)
|
||||
|
||||
if not storage_service.files["summary"].exists():
|
||||
storage_service.initialize_empty_dashboard()
|
||||
else:
|
||||
storage_service.update_leaderboard_model_info()
|
||||
|
||||
# Create agents and pipeline
|
||||
analysts, risk_manager, pm, long_term_memories = create_agents(
|
||||
config_name=config_name,
|
||||
initial_cash=initial_cash,
|
||||
margin_requirement=margin_requirement,
|
||||
enable_long_term_memory=runtime_config["enable_memory"],
|
||||
)
|
||||
for agent in analysts + [risk_manager, pm]:
|
||||
agent_id = getattr(agent, "agent_id", None) or getattr(agent, "name", None)
|
||||
if agent_id:
|
||||
runtime_manager.register_agent(agent_id)
|
||||
portfolio_state = storage_service.load_portfolio_state()
|
||||
pm.load_portfolio_state(portfolio_state)
|
||||
|
||||
settlement_coordinator = SettlementCoordinator(
|
||||
storage=storage_service,
|
||||
initial_capital=initial_cash,
|
||||
)
|
||||
|
||||
pipeline = TradingPipeline(
|
||||
analysts=analysts,
|
||||
risk_manager=risk_manager,
|
||||
portfolio_manager=pm,
|
||||
settlement_coordinator=settlement_coordinator,
|
||||
max_comm_cycles=runtime_config["max_comm_cycles"],
|
||||
runtime_manager=runtime_manager,
|
||||
)
|
||||
|
||||
# Create scheduler callback
|
||||
scheduler_callback = None
|
||||
trading_dates = []
|
||||
live_scheduler = None
|
||||
|
||||
if is_backtest:
|
||||
backtest_scheduler = BacktestScheduler(
|
||||
start_date=args.start_date,
|
||||
end_date=args.end_date,
|
||||
trading_calendar="NYSE",
|
||||
delay_between_days=0.5,
|
||||
)
|
||||
trading_dates = backtest_scheduler.get_trading_dates()
|
||||
|
||||
async def scheduler_callback_fn(callback):
|
||||
await backtest_scheduler.start(callback)
|
||||
|
||||
scheduler_callback = scheduler_callback_fn
|
||||
else:
|
||||
# Live mode: use daily or intraday scheduler with NYSE timezone
|
||||
live_scheduler = Scheduler(
|
||||
mode=runtime_config["schedule_mode"],
|
||||
trigger_time=runtime_config["trigger_time"],
|
||||
interval_minutes=runtime_config["interval_minutes"],
|
||||
config={"config_name": config_name},
|
||||
)
|
||||
|
||||
async def scheduler_callback_fn(callback):
|
||||
await live_scheduler.start(callback)
|
||||
|
||||
scheduler_callback = scheduler_callback_fn
|
||||
|
||||
# Create gateway
|
||||
gateway = Gateway(
|
||||
market_service=market_service,
|
||||
storage_service=storage_service,
|
||||
pipeline=pipeline,
|
||||
scheduler_callback=scheduler_callback,
|
||||
config={
|
||||
"mode": args.mode,
|
||||
"backtest_mode": is_backtest,
|
||||
"tickers": tickers,
|
||||
"config_name": config_name,
|
||||
"schedule_mode": runtime_config["schedule_mode"],
|
||||
"interval_minutes": runtime_config["interval_minutes"],
|
||||
"trigger_time": runtime_config["trigger_time"],
|
||||
"initial_cash": initial_cash,
|
||||
"margin_requirement": margin_requirement,
|
||||
"max_comm_cycles": runtime_config["max_comm_cycles"],
|
||||
"enable_memory": runtime_config["enable_memory"],
|
||||
},
|
||||
scheduler=live_scheduler if not is_backtest else None,
|
||||
)
|
||||
|
||||
if is_backtest:
|
||||
gateway.set_backtest_dates(trading_dates)
|
||||
|
||||
# Start long-term memory contexts and run gateway
|
||||
async with AsyncExitStack() as stack:
|
||||
try:
|
||||
for memory in long_term_memories:
|
||||
await stack.enter_async_context(memory)
|
||||
await gateway.start(host=args.host, port=args.port)
|
||||
finally:
|
||||
# Persist long-term memories before cleanup
|
||||
for memory in long_term_memories:
|
||||
try:
|
||||
if hasattr(memory, 'save') and callable(getattr(memory, 'save')):
|
||||
await memory.save()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to persist memory: {e}")
|
||||
unregister_runtime_manager()
|
||||
clear_global_runtime_manager()
|
||||
|
||||
|
||||
def build_arg_parser() -> argparse.ArgumentParser:
|
||||
"""Build the CLI parser for the gateway runtime entrypoint."""
|
||||
parser = argparse.ArgumentParser(description="Trading System")
|
||||
parser.add_argument("--mode", choices=["live", "backtest"], default="live")
|
||||
parser.add_argument(
|
||||
"--config-name",
|
||||
default="default_run",
|
||||
help=(
|
||||
"Run label under runs/<config_name>; not a special root-level "
|
||||
"live/backtest/production directory."
|
||||
),
|
||||
)
|
||||
parser.add_argument("--host", default="0.0.0.0")
|
||||
parser.add_argument("--port", type=int, default=8765)
|
||||
parser.add_argument(
|
||||
"--schedule-mode",
|
||||
choices=["daily", "intraday"],
|
||||
default="daily",
|
||||
)
|
||||
parser.add_argument("--trigger-time", default="09:30") # NYSE market open
|
||||
parser.add_argument("--interval-minutes", type=int, default=60)
|
||||
parser.add_argument("--poll-interval", type=int, default=10)
|
||||
parser.add_argument("--start-date")
|
||||
parser.add_argument("--end-date")
|
||||
parser.add_argument(
|
||||
"--enable-memory",
|
||||
action="store_true",
|
||||
help="Enable ReMeTaskLongTermMemory for agents",
|
||||
)
|
||||
return parser
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point"""
|
||||
parser = build_arg_parser()
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load config from env for logging
|
||||
runtime_config = _resolve_runtime_config(args)
|
||||
tickers = runtime_config["tickers"]
|
||||
initial_cash = runtime_config["initial_cash"]
|
||||
|
||||
logger.info("=" * 60)
|
||||
logger.info(f"Mode: {args.mode}, Config: {args.config_name}")
|
||||
logger.info(f"Tickers: {tickers}")
|
||||
logger.info(f"Initial Cash: ${initial_cash:,.2f}")
|
||||
logger.info(
|
||||
"Long-term Memory: %s",
|
||||
"enabled" if runtime_config["enable_memory"] else "disabled",
|
||||
)
|
||||
if args.mode == "backtest":
|
||||
if not args.start_date or not args.end_date:
|
||||
parser.error(
|
||||
"--start-date and --end-date required for backtest mode",
|
||||
)
|
||||
logger.info(f"Backtest: {args.start_date} to {args.end_date}")
|
||||
logger.info("=" * 60)
|
||||
|
||||
asyncio.run(run_with_gateway(args))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -5,7 +5,7 @@
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
from backend.apps.agent_service import create_app
|
||||
from backend.api import agents as agents_module
|
||||
from backend.api import runs as runs_module
|
||||
|
||||
|
||||
def test_agent_service_routes_include_control_plane_endpoints(tmp_path):
|
||||
@@ -73,10 +73,10 @@ def test_agent_service_read_routes(monkeypatch, tmp_path):
|
||||
def load_agent_file(self, config_name, agent_id, filename):
|
||||
return f"{config_name}:{agent_id}:{filename}"
|
||||
|
||||
monkeypatch.setattr(agents_module, "load_agent_profiles", lambda: {"portfolio_manager": {"skills": ["demo_skill"]}})
|
||||
monkeypatch.setattr(agents_module, "get_agent_model_info", lambda agent_id: ("deepseek-v3.2", "DASHSCOPE"))
|
||||
monkeypatch.setattr(runs_module, "load_agent_profiles", lambda: {"portfolio_manager": {"skills": ["demo_skill"]}})
|
||||
monkeypatch.setattr(runs_module, "get_agent_model_info", lambda agent_id: ("deepseek-v3.2", "DASHSCOPE"))
|
||||
monkeypatch.setattr(
|
||||
agents_module,
|
||||
runs_module,
|
||||
"load_agent_workspace_config",
|
||||
lambda path: type(
|
||||
"Cfg",
|
||||
@@ -91,20 +91,20 @@ def test_agent_service_read_routes(monkeypatch, tmp_path):
|
||||
)(),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
agents_module,
|
||||
runs_module,
|
||||
"get_bootstrap_config_for_run",
|
||||
lambda project_root, config_name: type("Bootstrap", (), {"agent_override": lambda self, agent_id: {}})(),
|
||||
)
|
||||
|
||||
app = create_app(project_root=tmp_path)
|
||||
app.dependency_overrides[agents_module.get_skills_manager] = lambda: _FakeSkillsManager()
|
||||
app.dependency_overrides[agents_module.get_workspace_manager] = lambda: _FakeWorkspaceManager()
|
||||
app.dependency_overrides[runs_module.get_skills_manager] = lambda: _FakeSkillsManager()
|
||||
app.dependency_overrides[runs_module.get_workspace_manager] = lambda: _FakeWorkspaceManager()
|
||||
|
||||
with TestClient(app) as client:
|
||||
profile = client.get("/api/workspaces/demo/agents/portfolio_manager/profile")
|
||||
skills = client.get("/api/workspaces/demo/agents/portfolio_manager/skills")
|
||||
detail = client.get("/api/workspaces/demo/agents/portfolio_manager/skills/demo_skill")
|
||||
workspace_file = client.get("/api/workspaces/demo/agents/portfolio_manager/files/MEMORY.md")
|
||||
profile = client.get("/api/runs/demo/agents/portfolio_manager/profile")
|
||||
skills = client.get("/api/runs/demo/agents/portfolio_manager/skills")
|
||||
detail = client.get("/api/runs/demo/agents/portfolio_manager/skills/demo_skill")
|
||||
workspace_file = client.get("/api/runs/demo/agents/portfolio_manager/files/MEMORY.md")
|
||||
|
||||
assert profile.status_code == 200
|
||||
assert profile.json()["profile"]["model_name"] == "deepseek-v3.2"
|
||||
@@ -118,4 +118,3 @@ def test_agent_service_read_routes(monkeypatch, tmp_path):
|
||||
assert workspace_file.status_code == 200
|
||||
assert workspace_file.json()["content"] == "demo:portfolio_manager:MEMORY.md"
|
||||
assert workspace_file.json()["scope_type"] == "runtime_run"
|
||||
assert "runs/<run_id>" in workspace_file.json()["scope_note"]
|
||||
|
||||
@@ -6,14 +6,14 @@ from pathlib import Path
|
||||
|
||||
|
||||
def test_main_resolve_evo_agent_ids_filters_unsupported_roles(monkeypatch):
|
||||
from backend import main as main_module
|
||||
from backend.core import pipeline_runner as runner_module
|
||||
|
||||
monkeypatch.setenv(
|
||||
"EVO_AGENT_IDS",
|
||||
"fundamentals_analyst,portfolio_manager,unknown,technical_analyst",
|
||||
)
|
||||
|
||||
resolved = main_module._resolve_evo_agent_ids()
|
||||
resolved = runner_module._resolve_evo_agent_ids()
|
||||
|
||||
assert resolved == {"fundamentals_analyst", "portfolio_manager", "technical_analyst"}
|
||||
|
||||
@@ -29,7 +29,7 @@ def test_pipeline_runner_resolve_evo_agent_ids_keeps_supported_roles(monkeypatch
|
||||
|
||||
|
||||
def test_main_create_analyst_agent_can_build_evo_agent(monkeypatch, tmp_path):
|
||||
from backend import main as main_module
|
||||
from backend.core import pipeline_runner as runner_module
|
||||
|
||||
created = {}
|
||||
|
||||
@@ -49,12 +49,12 @@ def test_main_create_analyst_agent_can_build_evo_agent(monkeypatch, tmp_path):
|
||||
self.toolkit = None
|
||||
|
||||
monkeypatch.setenv("EVO_AGENT_IDS", "fundamentals_analyst")
|
||||
monkeypatch.setattr(main_module, "EvoAgent", DummyEvoAgent)
|
||||
monkeypatch.setattr(main_module, "create_agent_toolkit", lambda *args, **kwargs: "toolkit")
|
||||
monkeypatch.setattr(runner_module, "EvoAgent", DummyEvoAgent)
|
||||
monkeypatch.setattr(runner_module, "create_agent_toolkit", lambda *args, **kwargs: "toolkit")
|
||||
|
||||
agent = main_module._create_analyst_agent(
|
||||
agent = runner_module._create_analyst_agent(
|
||||
analyst_type="fundamentals_analyst",
|
||||
config_name="demo",
|
||||
run_id="demo",
|
||||
model="model",
|
||||
formatter="formatter",
|
||||
skills_manager=DummySkillsManager(),
|
||||
@@ -71,7 +71,7 @@ def test_main_create_analyst_agent_can_build_evo_agent(monkeypatch, tmp_path):
|
||||
|
||||
|
||||
def test_main_create_risk_manager_can_build_evo_agent(monkeypatch, tmp_path):
|
||||
from backend import main as main_module
|
||||
from backend.core import pipeline_runner as runner_module
|
||||
|
||||
created = {}
|
||||
|
||||
@@ -91,11 +91,11 @@ def test_main_create_risk_manager_can_build_evo_agent(monkeypatch, tmp_path):
|
||||
self.toolkit = None
|
||||
|
||||
monkeypatch.setenv("EVO_AGENT_IDS", "risk_manager")
|
||||
monkeypatch.setattr(main_module, "EvoAgent", DummyEvoAgent)
|
||||
monkeypatch.setattr(main_module, "create_agent_toolkit", lambda *args, **kwargs: "risk-toolkit")
|
||||
monkeypatch.setattr(runner_module, "EvoAgent", DummyEvoAgent)
|
||||
monkeypatch.setattr(runner_module, "create_agent_toolkit", lambda *args, **kwargs: "risk-toolkit")
|
||||
|
||||
agent = main_module._create_risk_manager_agent(
|
||||
config_name="demo",
|
||||
agent = runner_module._create_risk_manager_agent(
|
||||
run_id="demo",
|
||||
model="model",
|
||||
formatter="formatter",
|
||||
skills_manager=DummySkillsManager(),
|
||||
@@ -112,7 +112,7 @@ def test_main_create_risk_manager_can_build_evo_agent(monkeypatch, tmp_path):
|
||||
|
||||
|
||||
def test_main_create_portfolio_manager_can_build_evo_agent(monkeypatch, tmp_path):
|
||||
from backend import main as main_module
|
||||
from backend.core import pipeline_runner as runner_module
|
||||
|
||||
created = {}
|
||||
|
||||
@@ -132,15 +132,15 @@ def test_main_create_portfolio_manager_can_build_evo_agent(monkeypatch, tmp_path
|
||||
self.toolkit = None
|
||||
|
||||
monkeypatch.setenv("EVO_AGENT_IDS", "portfolio_manager")
|
||||
monkeypatch.setattr(main_module, "EvoAgent", DummyEvoAgent)
|
||||
monkeypatch.setattr(runner_module, "EvoAgent", DummyEvoAgent)
|
||||
monkeypatch.setattr(
|
||||
main_module,
|
||||
runner_module,
|
||||
"create_agent_toolkit",
|
||||
lambda *args, **kwargs: "pm-toolkit",
|
||||
)
|
||||
|
||||
agent = main_module._create_portfolio_manager_agent(
|
||||
config_name="demo",
|
||||
agent = runner_module._create_portfolio_manager_agent(
|
||||
run_id="demo",
|
||||
model="model",
|
||||
formatter="formatter",
|
||||
initial_cash=12345.0,
|
||||
@@ -372,13 +372,13 @@ def test_pipeline_create_runtime_analyst_uses_legacy_when_not_in_evo_ids(monkeyp
|
||||
|
||||
def test_main_resolve_evo_agent_ids_returns_all_by_default(monkeypatch):
|
||||
"""Test that _resolve_evo_agent_ids returns all supported roles by default."""
|
||||
from backend import main as main_module
|
||||
from backend.core import pipeline_runner as runner_module
|
||||
from backend.config.constants import ANALYST_TYPES
|
||||
|
||||
# Unset EVO_AGENT_IDS to test default behavior
|
||||
monkeypatch.delenv("EVO_AGENT_IDS", raising=False)
|
||||
|
||||
resolved = main_module._resolve_evo_agent_ids()
|
||||
resolved = runner_module._resolve_evo_agent_ids()
|
||||
|
||||
expected = set(ANALYST_TYPES) | {"risk_manager", "portfolio_manager"}
|
||||
assert resolved == expected
|
||||
|
||||
@@ -10,37 +10,12 @@ For the planned convergence work, see
|
||||
|
||||
## Contents
|
||||
|
||||
- [deploy/systemd/evotraders.service](./systemd/evotraders.service)
|
||||
- systemd unit for the long-running 大时代 gateway process
|
||||
- [scripts/run_prod.sh](../scripts/run_prod.sh)
|
||||
- production launch script used by the systemd unit
|
||||
- [deploy/nginx/bigtime.cillinn.com.conf](./nginx/bigtime.cillinn.com.conf)
|
||||
- HTTPS nginx config with WebSocket proxying
|
||||
- [deploy/nginx/bigtime.cillinn.com.http.conf](./nginx/bigtime.cillinn.com.http.conf)
|
||||
- plain HTTP/static-site variant
|
||||
|
||||
## Deployment Topology Options
|
||||
|
||||
This directory documents two deployment topologies:
|
||||
|
||||
### 1. Compatibility Topology (backend.main) - CURRENT PRODUCTION DEFAULT
|
||||
|
||||
The checked-in production path uses the **compatibility gateway** (`backend.main`):
|
||||
|
||||
- nginx serves the built frontend from `/var/www/bigtime/current`
|
||||
- public domain examples use `bigtime.cillinn.com`
|
||||
- nginx proxies `/ws` to `127.0.0.1:8765`
|
||||
- systemd runs `scripts/run_prod.sh`
|
||||
- `scripts/run_prod.sh` starts `python3 -m backend.main` in live mode on `127.0.0.1:8765`
|
||||
|
||||
This is a **monolithic gateway** that embeds all services internally. It is the
|
||||
current production default for simplicity but does not expose the split FastAPI
|
||||
services directly.
|
||||
|
||||
**When to use**: Single-server deployments, simpler operational requirements,
|
||||
backwards compatibility with existing monitoring.
|
||||
|
||||
### 2. Preferred Topology (Split Services) - RECOMMENDED FOR NEW DEPLOYMENTS
|
||||
## Recommended Topology
|
||||
|
||||
The modern architecture exposes individual FastAPI services:
|
||||
|
||||
@@ -76,27 +51,9 @@ To deploy in split-service mode, you would:
|
||||
|
||||
## systemd
|
||||
|
||||
The current systemd unit:
|
||||
|
||||
- uses `WorkingDirectory=/root/code/evotraders`
|
||||
- executes [scripts/run_prod.sh](../scripts/run_prod.sh)
|
||||
- restarts automatically on failure
|
||||
|
||||
Enable and start:
|
||||
|
||||
```bash
|
||||
sudo cp deploy/systemd/evotraders.service /etc/systemd/system/evotraders.service
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable evotraders
|
||||
sudo systemctl start evotraders
|
||||
```
|
||||
|
||||
Check status and logs:
|
||||
|
||||
```bash
|
||||
sudo systemctl status evotraders
|
||||
journalctl -u evotraders -f
|
||||
```
|
||||
No maintained systemd unit is checked into the repository anymore. If deployment
|
||||
work resumes, add units that mirror the split-service topology used in local
|
||||
development.
|
||||
|
||||
## nginx
|
||||
|
||||
@@ -124,7 +81,7 @@ The checked-in TLS config expects Let's Encrypt assets at:
|
||||
Before using the production scripts, ensure the runtime environment has:
|
||||
|
||||
- a usable Python environment
|
||||
- backend dependencies installed from `requirements.txt`
|
||||
- backend dependencies installed from the checked-in Python package metadata in `pyproject.toml`
|
||||
- the package installed with `pip install -e .` or `uv pip install -e .`
|
||||
- frontend dependencies installed with `npm ci`
|
||||
- repo dependencies installed
|
||||
@@ -136,30 +93,10 @@ Recommended production install sequence:
|
||||
```bash
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
pip install -e .
|
||||
cd frontend && npm ci && npm run build && cd ..
|
||||
```
|
||||
|
||||
The production script currently sets:
|
||||
|
||||
```bash
|
||||
PYTHONPATH=/root/code/evotraders/.pydeps:.
|
||||
TICKERS=${TICKERS:-AAPL,MSFT,GOOGL,AMZN,NVDA,META,TSLA,AMD,NFLX,AVGO,PLTR,COIN}
|
||||
```
|
||||
|
||||
It then launches the current compatibility gateway/runtime process:
|
||||
|
||||
```bash
|
||||
python3 -m backend.main \
|
||||
--mode live \
|
||||
--config-name production \
|
||||
--host 127.0.0.1 \
|
||||
--port 8765 \
|
||||
--trigger-time now \
|
||||
--poll-interval 15
|
||||
```
|
||||
|
||||
## Skill Sandbox Configuration
|
||||
|
||||
Production deployments should enable Docker-based skill sandbox for security isolation:
|
||||
@@ -172,7 +109,7 @@ pip install -e ".[docker-sandbox]"
|
||||
docker info
|
||||
```
|
||||
|
||||
Environment variables (set by `scripts/run_prod.sh` with defaults):
|
||||
Example environment variables for a future deployment:
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
@@ -196,6 +133,5 @@ The checked-in deployment artifacts do not currently document or automate:
|
||||
- frontend build/publish steps
|
||||
- secret management
|
||||
|
||||
If you move production fully to split-service mode, update this directory so it
|
||||
documents the new service topology explicitly instead of relying on the gateway-
|
||||
only path.
|
||||
If deployment returns to active development, rewrite this directory around the
|
||||
same split-service topology used by `start-dev.sh`.
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
[Unit]
|
||||
Description=大时代 Production Service
|
||||
After=network.target
|
||||
# COMPATIBILITY_SURFACE: stable
|
||||
# OWNER: ops-team
|
||||
# SEE: docs/legacy-inventory.md#gateway-first-production-example
|
||||
#
|
||||
# This systemd unit runs the gateway-first production topology.
|
||||
# It executes scripts/run_prod.sh which launches backend.main as the
|
||||
# primary gateway/runtime process. For split-service deployment topology,
|
||||
# see docs/current-architecture.md and deploy/README.md
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/root/code/evotraders
|
||||
ExecStart=/root/code/evotraders/scripts/run_prod.sh
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
Environment=PYTHONUNBUFFERED=1
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
29
docs/README.md
Normal file
29
docs/README.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# Docs Index
|
||||
|
||||
这个目录只保留当前仍有维护价值的文档。
|
||||
阅读顺序建议从“权威事实”开始,再看“兼容清理”和“规划说明”。
|
||||
|
||||
## 权威事实
|
||||
|
||||
- [`current-architecture.md`](./current-architecture.md) — 当前项目架构、运行路径和真相源定义
|
||||
- [`project-layout.md`](./project-layout.md) — 当前仓库目录职责与阅读方式
|
||||
- [`terminology.md`](./terminology.md) — runtime、run、workspace 等核心术语
|
||||
- [`runtime-api-changes.md`](./runtime-api-changes.md) — 当前 `runtime_service` API 约定与行为补充
|
||||
- [`current-architecture.excalidraw`](./current-architecture.excalidraw) — 架构图源文件
|
||||
|
||||
## 兼容与遗留清理
|
||||
|
||||
- [`legacy-inventory.md`](./legacy-inventory.md) — 兼容层、遗留代码和历史边界清单
|
||||
- [`compat-removal-plan.md`](./compat-removal-plan.md) — 兼容移除计划与收敛方向
|
||||
|
||||
## 规划说明
|
||||
|
||||
- [`development-roadmap.md`](./development-roadmap.md) — 后续演进方向和阶段性路线
|
||||
|
||||
## 使用原则
|
||||
|
||||
- 讨论“现在系统怎么工作”,优先看 `current-architecture.md`
|
||||
- 讨论“顶层目录应该怎么理解”,优先看 `project-layout.md`
|
||||
- 讨论“某个旧入口为什么还在”,优先看 `legacy-inventory.md`
|
||||
- 讨论“准备删哪些兼容路径”,优先看 `compat-removal-plan.md`
|
||||
- 不要把历史计划或阶段性修复记录当成当前事实源
|
||||
@@ -67,11 +67,11 @@ backward-compatible behavior while migration settles.
|
||||
|
||||
## Remaining Migration Risks
|
||||
|
||||
### Split service deployment is not yet the checked-in production default
|
||||
### Checked-in deployment artifacts still lag the development topology
|
||||
|
||||
- The repo documents split-service local development clearly.
|
||||
- The checked-in production example still centers on `backend.main` and nginx
|
||||
WebSocket proxying.
|
||||
- The checked-in deployment docs still center on historical nginx
|
||||
WebSocket proxying rather than the active dev topology.
|
||||
- This is a topology mismatch to keep in mind when changing deploy docs or prod
|
||||
automation.
|
||||
|
||||
@@ -93,13 +93,13 @@ backward-compatible behavior while migration settles.
|
||||
Migration can be considered effectively complete when all of the following are
|
||||
true:
|
||||
|
||||
1. Production deployment docs and scripts explicitly run the same split-service
|
||||
topology used in development, or intentionally document a different stable
|
||||
production topology.
|
||||
1. Deployment docs and scripts explicitly run the same split-service
|
||||
topology used in development, or are removed from the repo.
|
||||
2. Critical read paths no longer require ambiguous fallback behavior to local
|
||||
module implementations.
|
||||
3. OpenClaw integration is documented as a stable contract with clear guidance
|
||||
on when to use the WebSocket gateway versus the REST surface.
|
||||
on when to use the WebSocket gateway versus the small set of CLI-backed
|
||||
gateway read helpers.
|
||||
4. The frontend-service routing model is stable enough that direct-service and
|
||||
gateway-mediated paths are deliberate design choices rather than migration
|
||||
leftovers.
|
||||
@@ -137,9 +137,6 @@ Recommended next action:
|
||||
These still have an operational reason to exist and should be documented rather
|
||||
than treated as accidental leftovers.
|
||||
|
||||
- `backend.main`
|
||||
- compatibility gateway/runtime process
|
||||
- still relevant for websocket transport and current deploy topology
|
||||
- `runs/<run_id>/team_dashboard/*.json`
|
||||
- export/consumer compatibility layer
|
||||
- gateway-mediated websocket/event flow
|
||||
@@ -147,8 +144,8 @@ than treated as accidental leftovers.
|
||||
|
||||
Recommended next action:
|
||||
|
||||
- keep these, but document them as intentional compatibility surfaces with
|
||||
explicit ownership.
|
||||
- keep only surfaces with an active operational consumer, and avoid routing new
|
||||
development through them.
|
||||
|
||||
### 3. Defer Until Topology Decisions Are Final
|
||||
|
||||
@@ -157,8 +154,8 @@ churn without simplifying the current runtime.
|
||||
|
||||
- `workspaces/` design-time registry versus `runs/<run_id>/` runtime state
|
||||
- env-dependent service fallback behavior
|
||||
- checked-in deployment docs centered on `backend.main`
|
||||
- dual OpenClaw shapes: gateway integration and REST facade
|
||||
- checked-in deployment docs that have not yet been rewritten around split services
|
||||
- dual OpenClaw access patterns: gateway integration and CLI-backed read helpers
|
||||
|
||||
Recommended next action:
|
||||
|
||||
|
||||
@@ -386,13 +386,13 @@
|
||||
"updated": 1,
|
||||
"link": null,
|
||||
"locked": false,
|
||||
"text": "Gateway :8765\\nbackend.main\\nWebSocket + runtime orchestration",
|
||||
"text": "Gateway :8765\\nGateway process\\nWebSocket + runtime orchestration",
|
||||
"fontSize": 18,
|
||||
"fontFamily": 5,
|
||||
"textAlign": "center",
|
||||
"verticalAlign": "middle",
|
||||
"containerId": null,
|
||||
"originalText": "Gateway :8765\nbackend.main\nWebSocket + runtime orchestration",
|
||||
"originalText": "Gateway :8765\nGateway process\nWebSocket + runtime orchestration",
|
||||
"lineHeight": 1.2
|
||||
},
|
||||
{
|
||||
@@ -696,13 +696,13 @@
|
||||
"updated": 1,
|
||||
"link": null,
|
||||
"locked": false,
|
||||
"text": "OpenClaw WS :18789\\noptional REST :8004",
|
||||
"text": "OpenClaw WS :18789\\nCLI-backed reads via gateway",
|
||||
"fontSize": 20,
|
||||
"fontFamily": 5,
|
||||
"textAlign": "center",
|
||||
"verticalAlign": "middle",
|
||||
"containerId": null,
|
||||
"originalText": "OpenClaw WS :18789\noptional REST :8004",
|
||||
"originalText": "OpenClaw WS :18789\nCLI-backed reads via gateway",
|
||||
"lineHeight": 1.2
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Current Architecture
|
||||
|
||||
This file describes the current code-supported architecture only. Historical
|
||||
paths and partial migrations are intentionally excluded unless called out as
|
||||
legacy compatibility.
|
||||
paths and partial migrations are intentionally excluded unless brief historical
|
||||
context is needed to explain the current shape.
|
||||
|
||||
Reference material:
|
||||
|
||||
@@ -11,40 +11,21 @@ Reference material:
|
||||
- legacy inventory: [legacy-inventory.md](./legacy-inventory.md)
|
||||
- terminology guide: [terminology.md](./terminology.md)
|
||||
|
||||
## Runtime Modes
|
||||
## Runtime Mode
|
||||
|
||||
The system supports two distinct runtime modes:
|
||||
The supported runtime model is the split-service development architecture.
|
||||
|
||||
### Standalone Mode (Legacy Compatibility)
|
||||
|
||||
Direct Gateway startup via `backend.main` as a monolithic entrypoint.
|
||||
|
||||
```bash
|
||||
python -m backend.main --mode live --port 8765
|
||||
```
|
||||
|
||||
**Characteristics:**
|
||||
- Single process runs Gateway, Pipeline, Market Service, and Scheduler
|
||||
- No service discovery or process management
|
||||
- Suitable for single-node deployments and quick testing
|
||||
- All components share the same memory space
|
||||
|
||||
**Use cases:**
|
||||
- Quick local testing without service orchestration
|
||||
- Single-node production deployments
|
||||
- Backward compatibility with legacy startup scripts
|
||||
|
||||
### Microservice Mode (Default for Development)
|
||||
|
||||
Split-service architecture with dedicated runtime_service managing the Gateway lifecycle.
|
||||
Split-service architecture with a dedicated runtime API surface and a separate
|
||||
Gateway process.
|
||||
|
||||
```bash
|
||||
./start-dev.sh # Starts all services including runtime_service and Gateway
|
||||
```
|
||||
|
||||
**Characteristics:**
|
||||
- `runtime_service` (:8003) acts as Gateway Process Manager
|
||||
- Gateway runs as a subprocess managed by runtime_service
|
||||
- `runtime_service` (:8003) provides runtime lifecycle APIs
|
||||
- the checked-in `start-dev.sh` starts split services and lets `runtime_service` spawn Gateway
|
||||
- manual split-service flows can also let `runtime_service` spawn Gateway
|
||||
- Clear separation between Control Plane (runtime_service) and Data Plane (Gateway)
|
||||
- Service discovery via environment variables
|
||||
- Independent scaling and deployment of each service
|
||||
@@ -54,20 +35,7 @@ Split-service architecture with dedicated runtime_service managing the Gateway l
|
||||
- Multi-node deployments
|
||||
- Production environments requiring service isolation
|
||||
|
||||
## Mode Comparison
|
||||
|
||||
| Aspect | Standalone Mode | Microservice Mode |
|
||||
|--------|-----------------|-------------------|
|
||||
| **Entry point** | `python -m backend.main` | `./start-dev.sh` or individual services |
|
||||
| **Process model** | Single monolithic process | Multiple specialized processes |
|
||||
| **Gateway management** | Self-contained | Managed by runtime_service |
|
||||
| **Service discovery** | None (in-process) | Environment variable based |
|
||||
| **Hot reload** | Full restart required | Per-service reload |
|
||||
| **Scaling** | Vertical only | Horizontal possible |
|
||||
| **Complexity** | Lower | Higher |
|
||||
| **Use case** | Testing, simple deployments | Development, production |
|
||||
|
||||
## Default Runtime Shape (Microservice Mode)
|
||||
## Default Runtime Shape
|
||||
|
||||
The active runtime path is:
|
||||
|
||||
@@ -83,7 +51,6 @@ Current service surfaces:
|
||||
- read-only explain/news APIs
|
||||
- `backend.apps.runtime_service` on `:8003`
|
||||
- runtime lifecycle and gateway process management
|
||||
- optional OpenClaw REST facade
|
||||
- gateway WebSocket on `:8765`
|
||||
- live feed/event transport and pipeline coordination
|
||||
|
||||
@@ -163,11 +130,11 @@ use `docker` mode with appropriate resource limits.
|
||||
|
||||
### Current State
|
||||
|
||||
The system is in a transitional state:
|
||||
The system is in an active development state:
|
||||
|
||||
1. **Microservice infrastructure is operational** - runtime_service can start/stop Gateway as subprocess
|
||||
2. **Pipeline logic remains in Gateway** - full Pipeline execution still happens within Gateway process
|
||||
3. **Standalone mode is preserved** - direct `backend.main` startup for compatibility
|
||||
3. **Direct gateway startup has been removed** - the repository now exposes a single supported startup model
|
||||
|
||||
### Future Direction
|
||||
|
||||
@@ -184,9 +151,9 @@ Phase 3: Pipeline decomposition (planned)
|
||||
- Gateway becomes a thin event router
|
||||
- runtime_service evolves into full orchestrator
|
||||
|
||||
Phase 4: Standalone mode deprecation (future)
|
||||
- Remove direct `backend.main` entry point
|
||||
- All deployments use microservice mode
|
||||
Phase 4: Deployment convergence (future)
|
||||
- Remove or rewrite historical deployment artifacts
|
||||
- Keep all documented startup paths aligned with `runtime_service`
|
||||
|
||||
## Legacy Compatibility
|
||||
|
||||
@@ -194,8 +161,8 @@ These items still exist, but they are not the recommended source of truth for
|
||||
new development:
|
||||
|
||||
- root-level runtime data directories such as `live/`, `production/`, `backtest/`
|
||||
- direct `backend.main` startup as the primary development path
|
||||
- historical documentation gaps that have not yet been fully rewritten
|
||||
|
||||
The current runtime still creates legacy `AnalystAgent` / `RiskAgent` /
|
||||
`PMAgent` instances directly. EvoAgent remains an in-progress migration target,
|
||||
not the default execution path.
|
||||
Legacy fallback agent paths still exist in compatibility-oriented creation
|
||||
flows, but the default `TradingPipeline` runtime now prefers `EvoAgent` for the
|
||||
supported roles unless rollout settings explicitly reduce that set.
|
||||
|
||||
@@ -63,14 +63,14 @@ Goal: preserve only intentional compatibility layers.
|
||||
|
||||
Planned work:
|
||||
|
||||
- identify startup scripts and deploy artifacts that still center on
|
||||
`backend.main` as a monolithic entrypoint
|
||||
- identify any remaining deployment docs that still lag the split-service topology
|
||||
- classify compatibility surfaces into:
|
||||
- stable and intentional
|
||||
- temporary and shrinking
|
||||
- removable once replacements are fully active
|
||||
- reduce env-dependent fallback ambiguity for read-only service routing where practical
|
||||
- document the difference between OpenClaw WebSocket integration and the optional REST facade
|
||||
- document the difference between OpenClaw WebSocket integration and the small
|
||||
set of CLI-backed gateway read helpers
|
||||
|
||||
Definition of done:
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ These are the current defaults to build against:
|
||||
- `backend.apps.trading_service` on `:8001`
|
||||
- `backend.apps.news_service` on `:8002`
|
||||
- gateway process
|
||||
- `backend.main`
|
||||
- `backend.gateway_server` in the default managed-runtime path
|
||||
- `backend.services.gateway` on `:8765`
|
||||
|
||||
## Compatibility Surface Classification
|
||||
@@ -35,7 +35,6 @@ compatibility surfaces with explicit ownership.
|
||||
|
||||
| Surface | Location | Owner | Reason |
|
||||
|---------|----------|-------|--------|
|
||||
| Gateway-first production | `scripts/run_prod.sh`, `deploy/systemd/`, `deploy/nginx/` | ops-team | Current production example runs gateway directly and proxies `/ws` |
|
||||
| Dashboard export layer | `runs/<run_id>/team_dashboard/*.json` | frontend-team | Downstream dashboard consumers read these exports |
|
||||
| Design-time workspace registry | `workspaces/`, `backend.api.workspaces` | control-plane-team | Control-plane editing and registry-style management |
|
||||
| Gateway WebSocket transport | `backend.services.gateway` on `:8765` | runtime-team | Live event streaming contract for frontend |
|
||||
@@ -50,8 +49,8 @@ in use.
|
||||
|
||||
| Surface | Location | Replacement | ETA |
|
||||
|---------|----------|-------------|-----|
|
||||
| Legacy analyst agents | `backend.agents.analyst.*` | `EvoAgent` | After EvoAgent smoke tests pass |
|
||||
| Mixed workspace_id semantics | `/api/workspaces/{id}/agents/...` | ✅ `/api/runs/{run_id}/agents/...` routes added | Completed |
|
||||
| Legacy agent fallback paths | compatibility constructors in `backend.core.pipeline_runner` | `EvoAgent` | After fallback-free runtime cutover |
|
||||
| Mixed workspace_id semantics | removed from runtime agent routes | ✅ `/api/runs/{run_id}/agents/...` is the only runtime agent route | Completed |
|
||||
| Root-level runtime directories | `live/`, `backtest/`, `production/` | `runs/<run_id>/` | ✅ Removed, backed up to runs/_legacy/ |
|
||||
|
||||
**Status**: Do not add new code using these surfaces. Migrate existing usage
|
||||
@@ -67,43 +66,13 @@ topology and service-routing policy are frozen.
|
||||
|---------|---------------|-----------------|
|
||||
| OpenClaw integration | Gateway WebSocket (port 18789) | Stable — REST facade removed |
|
||||
| Env-dependent service fallbacks | `TRADING_SERVICE_URL`, `NEWS_SERVICE_URL` fallbacks to local modules | Remove fallbacks and require explicit URLs? |
|
||||
| Split-service production deploy | Docs show gateway-first, dev uses split-service | Align production with dev topology? |
|
||||
| Split-service deployment docs | Deployment docs are still partial compared with the dev topology | Rewrite deploy docs around split services? |
|
||||
|
||||
**Status**: Document current behavior. Do not actively remove until topology
|
||||
decisions are finalized.
|
||||
|
||||
## Detailed Surface Documentation
|
||||
|
||||
### Gateway-First Production Example
|
||||
|
||||
**Files**:
|
||||
- `scripts/run_prod.sh` - Production launch script
|
||||
- `deploy/systemd/evotraders.service` - systemd unit
|
||||
- `deploy/nginx/bigtime.cillinn.com.conf` - HTTPS + WebSocket proxy
|
||||
- `deploy/nginx/bigtime.cillinn.com.http.conf` - HTTP variant
|
||||
|
||||
**Behavior**:
|
||||
```bash
|
||||
# scripts/run_prod.sh launches:
|
||||
python3 -m backend.main \
|
||||
--mode live \
|
||||
--config-name production \
|
||||
--host 127.0.0.1 \
|
||||
--port 8765
|
||||
```
|
||||
|
||||
**nginx proxies**:
|
||||
- `/ws` -> `127.0.0.1:8765` (WebSocket upgrade)
|
||||
- `/` -> static files in `/var/www/bigtime/current`
|
||||
|
||||
**Why this exists**:
|
||||
- Simpler production deployment (single process + nginx)
|
||||
- WebSocket is the practical live event contract for frontend
|
||||
- Split-service topology adds operational complexity not needed for all deployments
|
||||
|
||||
**Ownership**: ops-team
|
||||
**Status**: Stable and intentional
|
||||
|
||||
### Dashboard Export Layer
|
||||
|
||||
**Files**: `runs/<run_id>/team_dashboard/*.json`
|
||||
@@ -154,19 +123,14 @@ These remain in the tree, but they should not define the architecture for new wo
|
||||
- treat these as historical or compatibility-oriented data/layout artifacts
|
||||
- do not use them as the default runtime contract for new features
|
||||
|
||||
### Mixed `workspace_id` semantics on agent routes
|
||||
### Historical mixed `workspace_id` semantics on agent routes
|
||||
|
||||
- `/api/workspaces/{workspace_id}/agents/...`
|
||||
This compatibility shape has been removed from runtime agent routes.
|
||||
|
||||
**Read**:
|
||||
**Current rule**:
|
||||
|
||||
- design-time CRUD routes use `workspace_id` as a registry workspace id
|
||||
- profile, skills, and editable file routes use `workspace_id` as a run id
|
||||
|
||||
**Mitigation already in repo**:
|
||||
|
||||
- `agent_service /api/status` exposes scope metadata
|
||||
- runtime-read responses expose `scope_type` and `scope_note`
|
||||
- design-time CRUD routes use `/api/workspaces/{workspace_id}/agents/...`
|
||||
- runtime agent assets use `/api/runs/{run_id}/agents/...`
|
||||
|
||||
### Partial EvoAgent rollout
|
||||
|
||||
@@ -175,8 +139,10 @@ These remain in the tree, but they should not define the architecture for new wo
|
||||
|
||||
**Read**:
|
||||
|
||||
- EvoAgent is still a controlled rollout path
|
||||
- legacy analyst/risk/PM implementations remain the default runtime path for now
|
||||
- EvoAgent is the default selection path for supported roles in the current
|
||||
pipeline
|
||||
- legacy implementations remain as compatibility fallbacks in selected startup
|
||||
and runner paths
|
||||
|
||||
## Recommended Usage
|
||||
|
||||
@@ -185,13 +151,14 @@ When in doubt:
|
||||
1. trust `docs/current-architecture.md`
|
||||
2. trust `runs/<run_id>/` over root-level runtime directories
|
||||
3. treat `workspaces/` as control-plane registry, not runtime truth
|
||||
4. treat deploy artifacts as the current checked-in example, not the full system contract
|
||||
4. treat deploy artifacts as partial references, not the full system contract
|
||||
5. check this file's **Compatibility Surface Classification** before assuming something is legacy
|
||||
6. prefer `runtime_service`-managed startup for all new work
|
||||
|
||||
## Change Log
|
||||
|
||||
| Date | Change |
|
||||
|------|--------|
|
||||
| 2026-03-31 | Added Compatibility Surface Classification (3 buckets) |
|
||||
| 2026-03-31 | Documented OpenClaw dual integration (REST vs WebSocket) |
|
||||
| 2026-03-31 | Clarified OpenClaw integration documentation |
|
||||
| 2026-03-31 | Added ownership and status to all surfaces |
|
||||
|
||||
80
docs/project-layout.md
Normal file
80
docs/project-layout.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# Project Layout
|
||||
|
||||
这个文件只描述当前仓库目录的职责划分,不记录历史迁移过程。
|
||||
|
||||
如果你想知道“系统现在怎么工作”,优先看 `current-architecture.md`。
|
||||
如果你想知道“这个目录现在应该怎么理解”,看本文。
|
||||
|
||||
## 顶层目录
|
||||
|
||||
### 主线代码
|
||||
|
||||
- `backend/`
|
||||
- 后端运行时、服务、API、Pipeline、Agent、工具与测试
|
||||
- `frontend/`
|
||||
- React/Vite 前端
|
||||
- `shared/`
|
||||
- 前后端共享 schema 与 client 封装
|
||||
|
||||
### 当前有效的数据与状态目录
|
||||
|
||||
- `runs/`
|
||||
- 运行态真相源
|
||||
- 每个 `run_id` 下保存 BOOTSTRAP、agent 资产、state、logs、dashboard 导出
|
||||
- `workspaces/`
|
||||
- 设计时注册表
|
||||
- 用于 control-plane CRUD,不是默认运行时执行目录
|
||||
- `data/`
|
||||
- 项目使用的数据资产与本地数据文件
|
||||
|
||||
### 文档与部署
|
||||
|
||||
- `docs/`
|
||||
- 当前仍维护的架构、兼容边界、路线图、目录说明
|
||||
- `services/`
|
||||
- 服务边界说明
|
||||
- `deploy/`
|
||||
- 部署示例、systemd、nginx 配置
|
||||
- `scripts/`
|
||||
- 启动、检查、烟测与辅助脚本
|
||||
|
||||
### 项目入口与元数据
|
||||
|
||||
- `README.md`
|
||||
- 英文主说明
|
||||
- `README_zh.md`
|
||||
- 中文主说明
|
||||
- `pyproject.toml`
|
||||
- Python 项目元数据与依赖
|
||||
- `start-dev.sh`
|
||||
- 本地默认开发入口
|
||||
- `start.sh`
|
||||
- 偏生产风格的本地启动入口
|
||||
|
||||
## 本地环境目录
|
||||
|
||||
这些目录通常只对当前机器有效,不应作为架构事实源:
|
||||
|
||||
- `.venv/`
|
||||
- 本地 Python 虚拟环境
|
||||
- `.pydeps/`
|
||||
- 本地依赖落地目录
|
||||
- `.omc/`
|
||||
- 本地工具状态
|
||||
- `.codex/`
|
||||
- 本地代理/工具状态
|
||||
|
||||
## 阅读原则
|
||||
|
||||
- 看运行时行为,优先从 `backend/`、`frontend/`、`runs/` 开始
|
||||
- 看控制面编辑与设计态资产,优先看 `workspaces/`
|
||||
- 看服务边界,优先看 `services/README.md`
|
||||
- 看目录时,不要把本地环境目录当成项目结构的一部分
|
||||
- 新增目录前,先判断它属于“主线代码 / 运行态数据 / 文档部署 / 本地环境”中的哪一类
|
||||
|
||||
## 当前约束
|
||||
|
||||
- 不再新增参考仓、副本仓、样例快照目录到顶层
|
||||
- 不再把测试输出、PID、构建产物、缓存目录提交到仓库
|
||||
- 运行态相关的新文件优先放到 `runs/<run_id>/`
|
||||
- 设计态注册相关的新文件优先放到 `workspaces/`
|
||||
@@ -133,10 +133,7 @@ function buildRunScopedAgentPath(runId, agentId, suffix = '') {
|
||||
return `/runs/${encodeURIComponent(runId)}/agents/${encodeURIComponent(agentId)}${suffix}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Runtime agent routes use `/runs/{run_id}/agents/...`.
|
||||
* Legacy `/workspaces/...` routes are deprecated but remain for backward compatibility.
|
||||
*/
|
||||
/** Runtime agent routes use `/runs/{run_id}/agents/...`. */
|
||||
export function fetchAgentProfile(runId, agentId) {
|
||||
return safeFetch(CONTROL_API_BASE, buildRunScopedAgentPath(runId, agentId, '/profile'));
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ describe('runtimeApi run-scoped agent routes', () => {
|
||||
await fetchAgentProfile('20260330_123000', 'portfolio_manager');
|
||||
|
||||
expect(fetchMock).toHaveBeenCalledWith(
|
||||
expect.stringContaining('/workspaces/20260330_123000/agents/portfolio_manager/profile')
|
||||
expect.stringContaining('/runs/20260330_123000/agents/portfolio_manager/profile')
|
||||
);
|
||||
});
|
||||
|
||||
@@ -35,7 +35,7 @@ describe('runtimeApi run-scoped agent routes', () => {
|
||||
await updateAgentWorkspaceFile('20260330_123000', 'risk_manager', 'MEMORY.md', '# demo');
|
||||
|
||||
expect(fetchMock).toHaveBeenCalledWith(
|
||||
expect.stringContaining('/workspaces/20260330_123000/agents/risk_manager/files/MEMORY.md'),
|
||||
expect.stringContaining('/runs/20260330_123000/agents/risk_manager/files/MEMORY.md'),
|
||||
expect.objectContaining({
|
||||
method: 'PUT',
|
||||
body: '# demo'
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"status": "failed",
|
||||
"failedTests": []
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# COMPATIBILITY_SURFACE: stable
|
||||
# OWNER: ops-team
|
||||
# SEE: docs/legacy-inventory.md#gateway-first-production-example
|
||||
#
|
||||
# Gateway-first production launch script.
|
||||
# This is the current checked-in production example, running the gateway
|
||||
# directly and proxying /ws instead of exposing every split FastAPI service.
|
||||
# For split-service topology, see start-dev.sh and docs/current-architecture.md
|
||||
set -euo pipefail
|
||||
|
||||
cd /root/code/evotraders
|
||||
|
||||
export PYTHONPATH=/root/code/evotraders/.pydeps:.
|
||||
export TICKERS="${TICKERS:-AAPL,MSFT,GOOGL,AMZN,NVDA,META,TSLA,AMD,NFLX,AVGO,PLTR,COIN}"
|
||||
|
||||
# 技能沙盒配置(生产环境建议使用 docker)
|
||||
export SKILL_SANDBOX_MODE="${SKILL_SANDBOX_MODE:-docker}"
|
||||
export SKILL_SANDBOX_IMAGE="${SKILL_SANDBOX_IMAGE:-python:3.11-slim}"
|
||||
export SKILL_SANDBOX_MEMORY_LIMIT="${SKILL_SANDBOX_MEMORY_LIMIT:-512m}"
|
||||
export SKILL_SANDBOX_CPU_LIMIT="${SKILL_SANDBOX_CPU_LIMIT:-1.0}"
|
||||
export SKILL_SANDBOX_NETWORK="${SKILL_SANDBOX_NETWORK:-none}"
|
||||
export SKILL_SANDBOX_TIMEOUT="${SKILL_SANDBOX_TIMEOUT:-60}"
|
||||
|
||||
# "production" here is an explicit deployment run label, not a required
|
||||
# root-level runtime directory name.
|
||||
|
||||
exec python3 -m backend.main \
|
||||
--mode live \
|
||||
--config-name production \
|
||||
--host 127.0.0.1 \
|
||||
--port 8765 \
|
||||
--trigger-time now \
|
||||
--poll-interval 15
|
||||
@@ -1,168 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Verify documentation and script consistency.
|
||||
|
||||
This script checks that:
|
||||
1. README.md mentions correct service ports
|
||||
2. start-dev.sh starts services on documented ports
|
||||
3. deploy/README.md is consistent with production scripts
|
||||
4. Service ports match across all documentation
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
PROJECT_ROOT = Path(__file__).resolve().parents[1]
|
||||
|
||||
# Expected service ports (source of truth)
|
||||
SERVICE_PORTS = {
|
||||
"agent_service": 8000,
|
||||
"trading_service": 8001,
|
||||
"news_service": 8002,
|
||||
"runtime_service": 8003,
|
||||
"gateway_websocket": 8765,
|
||||
}
|
||||
|
||||
|
||||
def check_readme_ports() -> list[str]:
|
||||
"""Check that README.md documents correct ports."""
|
||||
errors = []
|
||||
readme_path = PROJECT_ROOT / "README.md"
|
||||
readme_content = readme_path.read_text(encoding="utf-8")
|
||||
|
||||
# Check for each service port mention
|
||||
for service, port in SERVICE_PORTS.items():
|
||||
port_patterns = [
|
||||
f":{port}",
|
||||
f"port {port}",
|
||||
f"localhost:{port}",
|
||||
]
|
||||
found = any(pattern in readme_content for pattern in port_patterns)
|
||||
if not found:
|
||||
errors.append(f"README.md: Missing documentation for {service} on port {port}")
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def check_start_dev_sh_ports() -> list[str]:
|
||||
"""Check that start-dev.sh uses correct ports."""
|
||||
errors = []
|
||||
script_path = PROJECT_ROOT / "start-dev.sh"
|
||||
script_content = script_path.read_text(encoding="utf-8")
|
||||
|
||||
# Check for port declarations in start_service calls
|
||||
for service, port in SERVICE_PORTS.items():
|
||||
if service == "gateway_websocket":
|
||||
# Gateway uses --port flag
|
||||
if f"--port {port}" not in script_content:
|
||||
errors.append(f"start-dev.sh: Gateway not using port {port}")
|
||||
else:
|
||||
# Services use port parameter in start_service
|
||||
pattern = rf'start_service\s+"{service}"\s+"[^"]+"\s+{port}'
|
||||
if not re.search(pattern, script_content):
|
||||
# Also check for explicit port mentions
|
||||
if f"port {port}" not in script_content and f":{port}" not in script_content:
|
||||
errors.append(f"start-dev.sh: {service} not using port {port}")
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def check_deploy_readme_consistency() -> list[str]:
|
||||
"""Check that deploy/README.md is consistent with scripts."""
|
||||
errors = []
|
||||
deploy_readme_path = PROJECT_ROOT / "deploy" / "README.md"
|
||||
deploy_content = deploy_readme_path.read_text(encoding="utf-8")
|
||||
|
||||
# Check for gateway port consistency
|
||||
if "127.0.0.1:8765" not in deploy_content:
|
||||
errors.append("deploy/README.md: Gateway port 8765 not documented correctly")
|
||||
|
||||
# Check for production script reference
|
||||
if "scripts/run_prod.sh" not in deploy_content:
|
||||
errors.append("deploy/README.md: Missing reference to scripts/run_prod.sh")
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def check_run_prod_sh_ports() -> list[str]:
|
||||
"""Check that run_prod.sh uses correct ports."""
|
||||
errors = []
|
||||
script_path = PROJECT_ROOT / "scripts" / "run_prod.sh"
|
||||
script_content = script_path.read_text(encoding="utf-8")
|
||||
|
||||
# Production script should use port 8765 for gateway
|
||||
if "--port 8765" not in script_content:
|
||||
errors.append("scripts/run_prod.sh: Not using gateway port 8765")
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def check_service_main_blocks() -> list[str]:
|
||||
"""Check that service modules use correct ports in __main__ blocks."""
|
||||
errors = []
|
||||
|
||||
service_files = {
|
||||
"agent_service": PROJECT_ROOT / "backend" / "apps" / "agent_service.py",
|
||||
"trading_service": PROJECT_ROOT / "backend" / "apps" / "trading_service.py",
|
||||
"news_service": PROJECT_ROOT / "backend" / "apps" / "news_service.py",
|
||||
"runtime_service": PROJECT_ROOT / "backend" / "apps" / "runtime_service.py",
|
||||
}
|
||||
|
||||
for service, file_path in service_files.items():
|
||||
if not file_path.exists():
|
||||
errors.append(f"{service}: File not found at {file_path}")
|
||||
continue
|
||||
|
||||
content = file_path.read_text(encoding="utf-8")
|
||||
expected_port = SERVICE_PORTS[service]
|
||||
|
||||
# Check for port= in uvicorn.run or app.run
|
||||
if f"port={expected_port}" not in content and f"port= {expected_port}" not in content:
|
||||
errors.append(f"{file_path}: Not using expected port {expected_port}")
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Verify documentation and script consistency.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--strict",
|
||||
action="store_true",
|
||||
help="Treat warnings as errors",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
all_errors = []
|
||||
|
||||
print("Checking README.md ports...")
|
||||
all_errors.extend(check_readme_ports())
|
||||
|
||||
print("Checking start-dev.sh ports...")
|
||||
all_errors.extend(check_start_dev_sh_ports())
|
||||
|
||||
print("Checking deploy/README.md consistency...")
|
||||
all_errors.extend(check_deploy_readme_consistency())
|
||||
|
||||
print("Checking scripts/run_prod.sh ports...")
|
||||
all_errors.extend(check_run_prod_sh_ports())
|
||||
|
||||
print("Checking service __main__ blocks...")
|
||||
all_errors.extend(check_service_main_blocks())
|
||||
|
||||
if all_errors:
|
||||
print("\nConsistency errors found:")
|
||||
for error in all_errors:
|
||||
print(f" - {error}")
|
||||
return 1 if args.strict else 0
|
||||
else:
|
||||
print("\nAll consistency checks passed!")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -20,31 +20,19 @@ and the next-step execution plan lives at
|
||||
| `backend.apps.trading_service` | `8001` | Read-only trading data APIs such as prices, financials, insider trades |
|
||||
| `backend.apps.news_service` | `8002` | Read-only explain/news APIs such as story, similar days, range explain |
|
||||
| `backend.apps.runtime_service` | `8003` | Runtime lifecycle APIs under `/api/runtime/*` |
|
||||
| Gateway (`backend.main`) | `8765` | WebSocket feed, runtime event stream, pipeline execution |
|
||||
| Gateway process | `8765` | WebSocket feed, runtime event stream, pipeline execution |
|
||||
| OpenClaw Gateway | `18789` | External OpenClaw WebSocket endpoint consumed by 大时代 gateway |
|
||||
|
||||
## Runtime Modes
|
||||
## Runtime Mode
|
||||
|
||||
### Standalone Mode (Direct Gateway Startup)
|
||||
|
||||
For simple deployments or backward compatibility:
|
||||
|
||||
```bash
|
||||
python -m backend.main --mode live --host 0.0.0.0 --port 8765
|
||||
```
|
||||
|
||||
In this mode, Gateway runs as the primary process with all components
|
||||
(Pipeline, Market Service, Scheduler) loaded in-process.
|
||||
|
||||
### Microservice Mode (Recommended)
|
||||
|
||||
For development and production with service isolation:
|
||||
For development and service isolation:
|
||||
|
||||
```bash
|
||||
./start-dev.sh
|
||||
```
|
||||
|
||||
This starts all services with `runtime_service` managing the Gateway lifecycle.
|
||||
Today this script starts the split services and then relies on
|
||||
`runtime_service` to spawn the Gateway during startup.
|
||||
|
||||
## What Runs By Default In Dev
|
||||
|
||||
@@ -60,11 +48,9 @@ That script starts:
|
||||
- `trading_service` on `8001`
|
||||
- `news_service` on `8002`
|
||||
- `runtime_service` on `8003`
|
||||
- 大时代 gateway on `8765` (as subprocess of runtime_service)
|
||||
- 大时代 gateway on `8765` (spawned by `runtime_service`)
|
||||
|
||||
It does **not** start.
|
||||
|
||||
Instead, the gateway expects an OpenClaw WebSocket server to already be
|
||||
It also expects an OpenClaw WebSocket server to already be
|
||||
available at `ws://localhost:18789` unless you override the OpenClaw gateway
|
||||
configuration outside the script.
|
||||
|
||||
@@ -79,8 +65,9 @@ python -m uvicorn backend.apps.news_service:app --host 0.0.0.0 --port 8002 --rel
|
||||
python -m uvicorn backend.apps.runtime_service:app --host 0.0.0.0 --port 8003 --reload
|
||||
```
|
||||
|
||||
The Gateway is started by `runtime_service` via the `/api/runtime/start` API,
|
||||
not manually. To start a runtime:
|
||||
If you launch only the split FastAPI surfaces manually, `runtime_service` can
|
||||
start the Gateway via the `/api/runtime/start` API. To start a runtime in that
|
||||
fully managed flow:
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:8003/api/runtime/start \
|
||||
@@ -94,11 +81,6 @@ curl -X POST http://localhost:8003/api/runtime/start \
|
||||
}'
|
||||
```
|
||||
|
||||
Optional OpenClaw REST surface:
|
||||
|
||||
```bash
|
||||
```
|
||||
|
||||
## Runtime Responsibilities
|
||||
|
||||
The runtime path is intentionally split:
|
||||
@@ -159,7 +141,7 @@ architecture. Its responsibilities:
|
||||
```
|
||||
runtime_service (:8003)
|
||||
|
|
||||
|-- spawns --> Gateway subprocess (:8765)
|
||||
|-- can spawn --> Gateway subprocess (:8765)
|
||||
|
|
||||
|-- TradingPipeline
|
||||
|-- MarketService
|
||||
@@ -167,8 +149,8 @@ runtime_service (:8003)
|
||||
|-- WebSocket server
|
||||
```
|
||||
|
||||
The Gateway subprocess runs `backend.gateway_server` module (not `backend.main`)
|
||||
with run-specific configuration passed via CLI arguments.
|
||||
The Gateway subprocess runs `backend.gateway_server` with run-specific
|
||||
configuration passed by `runtime_service`.
|
||||
|
||||
## Environment Variables
|
||||
|
||||
@@ -221,13 +203,14 @@ Other flows still depend on the gateway WebSocket and control plane APIs.
|
||||
|
||||
## OpenClaw Integration Notes
|
||||
|
||||
There are two separate OpenClaw integration surfaces in this repo:
|
||||
OpenClaw currently appears through two concrete access patterns in this repo:
|
||||
|
||||
- OpenClaw WebSocket gateway on `:18789`
|
||||
- used directly by `backend/services/gateway.py`
|
||||
- this is what `start-dev.sh` assumes exists
|
||||
- optional REST facade over OpenClaw CLI-backed reads
|
||||
- useful for typed client access and service-level testing
|
||||
- selected CLI-backed read helpers behind gateway handlers
|
||||
- used for a small set of history/status style reads
|
||||
- not a separate standalone REST service surface
|
||||
|
||||
Do not treat those as interchangeable in docs or deployment config.
|
||||
|
||||
@@ -256,8 +239,6 @@ backend.apps.news_service
|
||||
backend.gateway_server
|
||||
└─ Gateway subprocess entry point (run-scoped)
|
||||
|
||||
backend.main
|
||||
└─ standalone Gateway entry point (compatibility)
|
||||
```
|
||||
|
||||
## Migration Boundaries
|
||||
@@ -279,14 +260,14 @@ Also note the remaining naming split:
|
||||
|
||||
- Pipeline logic lives in Gateway process
|
||||
- Gateway is spawned as subprocess by runtime_service
|
||||
- Standalone mode (`backend.main`) preserved for compatibility
|
||||
- Historical direct gateway startup has been removed from the repository
|
||||
|
||||
### Target State
|
||||
|
||||
- Pipeline stages become independent services
|
||||
- Gateway becomes thin event router
|
||||
- runtime_service becomes full orchestrator
|
||||
- Standalone mode deprecated and removed
|
||||
- Keep converging on the split-service startup model
|
||||
|
||||
See [docs/development-roadmap.md](../docs/development-roadmap.md) for detailed
|
||||
phase planning.
|
||||
|
||||
170
start-dev.sh
170
start-dev.sh
@@ -5,22 +5,16 @@
|
||||
#
|
||||
# 启动模式说明:
|
||||
# -------------
|
||||
# 本脚本支持两种启动模式:
|
||||
#
|
||||
# 1. 微服务模式 (默认) - 启动 4 个独立服务 + Gateway
|
||||
# 这是推荐的开发模式,各服务独立运行,便于单独调试和重启
|
||||
# 本脚本只支持当前开发主路径:
|
||||
# 启动 4 个独立服务 + 由 runtime_service 托管的 Gateway
|
||||
# - agent_service (端口 8000): Agent 生命周期管理
|
||||
# - runtime_service (端口 8003): 运行时配置和 Pipeline 执行
|
||||
# - trading_service (端口 8001): 市场数据和交易操作
|
||||
# - news_service (端口 8002): 新闻采集和富化
|
||||
# - gateway (端口 8765): WebSocket 网关,前端连接入口
|
||||
#
|
||||
# 2. 独立模式 (--standalone) - 仅启动 Gateway
|
||||
# Gateway 内部会自行管理服务,适合快速验证或资源受限环境
|
||||
# - gateway (端口 8765): 由 runtime_service 拉起的 WebSocket 网关
|
||||
#
|
||||
# 用法:
|
||||
# ./start-dev.sh # 启动微服务模式
|
||||
# ./start-dev.sh --standalone # 启动独立模式
|
||||
# ./start-dev.sh # 启动开发环境
|
||||
# ./start-dev.sh --help # 显示帮助信息
|
||||
#
|
||||
|
||||
@@ -51,9 +45,6 @@ readonly NC='\033[0m' # No Color
|
||||
# 进程 ID 数组
|
||||
PIDS=()
|
||||
|
||||
# 启动模式: "microservices" 或 "standalone"
|
||||
MODE="microservices"
|
||||
|
||||
# ============================================
|
||||
# 工具函数
|
||||
# ============================================
|
||||
@@ -90,22 +81,15 @@ show_help() {
|
||||
./start-dev.sh [选项]
|
||||
|
||||
选项:
|
||||
--standalone 以独立模式启动(仅启动 Gateway,内部管理服务)
|
||||
--help, -h 显示此帮助信息
|
||||
|
||||
模式说明:
|
||||
|
||||
微服务模式 (默认):
|
||||
启动 4 个独立微服务 + Gateway,各服务独立进程,便于单独调试
|
||||
开发模式:
|
||||
启动 4 个独立微服务 + 托管 Gateway,各服务独立进程,便于单独调试
|
||||
- agent_service: http://localhost:8000 (Agent 生命周期)
|
||||
- trading_service: http://localhost:8001 (市场数据)
|
||||
- news_service: http://localhost:8002 (新闻服务)
|
||||
- runtime_service: http://localhost:8003 (运行时管理)
|
||||
- gateway: ws://localhost:8765 (WebSocket 网关)
|
||||
|
||||
独立模式 (--standalone):
|
||||
仅启动 Gateway,由 Gateway 内部自行管理服务
|
||||
适合快速验证或资源受限环境
|
||||
- gateway: ws://localhost:8765 (由 runtime_service 托管)
|
||||
|
||||
环境要求:
|
||||
- Python 3.9+
|
||||
@@ -113,8 +97,7 @@ show_help() {
|
||||
- .env 文件 (可选但推荐)
|
||||
|
||||
示例:
|
||||
./start-dev.sh # 启动微服务模式
|
||||
./start-dev.sh --standalone # 启动独立模式
|
||||
./start-dev.sh # 启动开发环境
|
||||
EOF
|
||||
}
|
||||
|
||||
@@ -125,10 +108,6 @@ EOF
|
||||
parse_args() {
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--standalone)
|
||||
MODE="standalone"
|
||||
shift
|
||||
;;
|
||||
--help|-h)
|
||||
show_help
|
||||
exit 0
|
||||
@@ -249,13 +228,7 @@ check_env_file() {
|
||||
check_ports() {
|
||||
log_step "检查端口占用情况..."
|
||||
|
||||
local ports=()
|
||||
|
||||
if [[ "$MODE" == "microservices" ]]; then
|
||||
ports=($AGENT_SERVICE_PORT $TRADING_SERVICE_PORT $NEWS_SERVICE_PORT $RUNTIME_SERVICE_PORT $GATEWAY_PORT)
|
||||
else
|
||||
ports=($GATEWAY_PORT)
|
||||
fi
|
||||
local ports=($AGENT_SERVICE_PORT $TRADING_SERVICE_PORT $NEWS_SERVICE_PORT $RUNTIME_SERVICE_PORT $GATEWAY_PORT)
|
||||
|
||||
local occupied=()
|
||||
for port in "${ports[@]}"; do
|
||||
@@ -340,16 +313,88 @@ start_service() {
|
||||
PIDS+=($!)
|
||||
}
|
||||
|
||||
start_gateway() {
|
||||
log_step "启动 Gateway (WebSocket 服务)..."
|
||||
log_info "Gateway 将作为子进程启动 (端口 ${GATEWAY_PORT})"
|
||||
log_info "前端连接地址: ws://localhost:${GATEWAY_PORT}"
|
||||
wait_for_runtime_service() {
|
||||
log_step "等待 runtime_service 就绪..."
|
||||
|
||||
SERVICE_NAME="gateway" python -m backend.main \
|
||||
--mode live \
|
||||
--host 0.0.0.0 \
|
||||
--port "$GATEWAY_PORT" &
|
||||
PIDS+=($!)
|
||||
local runtime_url="http://127.0.0.1:${RUNTIME_SERVICE_PORT}/health"
|
||||
local attempts=30
|
||||
|
||||
for ((i=1; i<=attempts; i++)); do
|
||||
if python - <<PY >/dev/null 2>&1; then
|
||||
import urllib.request
|
||||
with urllib.request.urlopen("${runtime_url}", timeout=1.5) as resp:
|
||||
raise SystemExit(0 if resp.status == 200 else 1)
|
||||
PY
|
||||
log_info "runtime_service 已就绪: ${runtime_url}"
|
||||
return 0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
log_error "runtime_service 未在预期时间内就绪"
|
||||
return 1
|
||||
}
|
||||
|
||||
start_managed_runtime() {
|
||||
log_step "通过 runtime_service 创建默认运行时..."
|
||||
|
||||
local runtime_api="http://127.0.0.1:${RUNTIME_SERVICE_PORT}/api/runtime/start"
|
||||
|
||||
if ! python - <<PY; then
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import urllib.request
|
||||
|
||||
tickers_env = os.getenv("TICKERS", "")
|
||||
tickers = [item.strip().upper() for item in tickers_env.split(",") if item.strip()]
|
||||
if not tickers:
|
||||
tickers = ["AAPL", "MSFT", "GOOGL", "AMZN", "NVDA", "META", "TSLA", "AMD", "NFLX", "AVGO", "PLTR", "COIN"]
|
||||
|
||||
def _env_int(name: str, default: int) -> int:
|
||||
value = os.getenv(name, "").strip()
|
||||
return int(value) if value else default
|
||||
|
||||
def _env_float(name: str, default: float) -> float:
|
||||
value = os.getenv(name, "").strip()
|
||||
return float(value) if value else default
|
||||
|
||||
payload = {
|
||||
"launch_mode": "fresh",
|
||||
"tickers": tickers,
|
||||
"schedule_mode": os.getenv("SCHEDULE_MODE", "daily").strip() or "daily",
|
||||
"interval_minutes": _env_int("INTERVAL_MINUTES", 60),
|
||||
"trigger_time": os.getenv("TRIGGER_TIME", "09:30").strip() or "09:30",
|
||||
"max_comm_cycles": _env_int("MAX_COMM_CYCLES", 2),
|
||||
"initial_cash": _env_float("INITIAL_CASH", 100000.0),
|
||||
"margin_requirement": _env_float("MARGIN_REQUIREMENT", 0.0),
|
||||
"enable_memory": os.getenv("ENABLE_MEMORY", "").strip().lower() in {"1", "true", "yes", "on"},
|
||||
"mode": os.getenv("RUNTIME_MODE", "live").strip() or "live",
|
||||
"poll_interval": _env_int("POLL_INTERVAL", 10),
|
||||
}
|
||||
|
||||
data = json.dumps(payload).encode("utf-8")
|
||||
req = urllib.request.Request(
|
||||
"${runtime_api}",
|
||||
data=data,
|
||||
headers={"Content-Type": "application/json"},
|
||||
method="POST",
|
||||
)
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
body = json.loads(resp.read().decode("utf-8"))
|
||||
except Exception as exc:
|
||||
print(f"FAILED: {exc}", file=sys.stderr)
|
||||
raise
|
||||
|
||||
print(json.dumps(body, ensure_ascii=False))
|
||||
PY
|
||||
log_error "通过 runtime_service 创建运行时失败"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_info "默认运行时已创建,Gateway 将由 runtime_service 托管"
|
||||
}
|
||||
|
||||
# ============================================
|
||||
@@ -366,7 +411,7 @@ start_microservices_mode() {
|
||||
echo -e " runtime_service: http://localhost:${RUNTIME_SERVICE_PORT}"
|
||||
echo -e " trading_service: http://localhost:${TRADING_SERVICE_PORT}"
|
||||
echo -e " news_service: http://localhost:${NEWS_SERVICE_PORT}"
|
||||
echo -e " gateway: ws://localhost:${GATEWAY_PORT}"
|
||||
echo -e " gateway: ws://localhost:${GATEWAY_PORT} (由 runtime_service 拉起)"
|
||||
echo -e "${CYAN}==========================================${NC}"
|
||||
echo ""
|
||||
|
||||
@@ -390,8 +435,8 @@ start_microservices_mode() {
|
||||
start_service "trading_service" "backend.apps.trading_service:app" "$TRADING_SERVICE_PORT"
|
||||
start_service "news_service" "backend.apps.news_service:app" "$NEWS_SERVICE_PORT"
|
||||
|
||||
# 启动 Gateway(作为子进程)
|
||||
start_gateway
|
||||
wait_for_runtime_service
|
||||
start_managed_runtime
|
||||
|
||||
echo ""
|
||||
log_info "所有服务已启动"
|
||||
@@ -399,30 +444,6 @@ start_microservices_mode() {
|
||||
echo ""
|
||||
}
|
||||
|
||||
# ============================================
|
||||
# 独立模式启动
|
||||
# ============================================
|
||||
|
||||
start_standalone_mode() {
|
||||
log_step "启动独立模式..."
|
||||
echo ""
|
||||
echo -e "${CYAN}==========================================${NC}"
|
||||
echo -e "${CYAN} 独立模式 ${NC}"
|
||||
echo -e "${CYAN}==========================================${NC}"
|
||||
echo -e " gateway: ws://localhost:${GATEWAY_PORT}"
|
||||
echo -e "${CYAN}==========================================${NC}"
|
||||
echo ""
|
||||
log_info "Gateway 将内部管理服务"
|
||||
|
||||
# 启动 Gateway(独立模式)
|
||||
start_gateway
|
||||
|
||||
echo ""
|
||||
log_info "Gateway 已启动(独立模式)"
|
||||
log_info "按 Ctrl+C 停止服务"
|
||||
echo ""
|
||||
}
|
||||
|
||||
# ============================================
|
||||
# 清理与信号处理
|
||||
# ============================================
|
||||
@@ -480,12 +501,7 @@ main() {
|
||||
echo -e "${GREEN}==========================================${NC}"
|
||||
echo ""
|
||||
|
||||
# 根据模式启动服务
|
||||
if [[ "$MODE" == "standalone" ]]; then
|
||||
start_standalone_mode
|
||||
else
|
||||
start_microservices_mode
|
||||
fi
|
||||
|
||||
# 等待所有后台进程
|
||||
wait
|
||||
|
||||
13
start.sh
13
start.sh
@@ -6,7 +6,6 @@
|
||||
# ./start.sh # 构建前端 + 后台启动全部服务 (默认)
|
||||
# ./start.sh --no-build # 跳过前端构建
|
||||
# ./start.sh --no-daemon # 前台运行 (不使用 nohup)
|
||||
# ./start.sh --gateway-only # 仅启动 Gateway (配合 nginx)
|
||||
# ./start.sh stop # 停止所有后台服务
|
||||
# ./start.sh status # 查看服务状态
|
||||
#
|
||||
@@ -37,14 +36,12 @@ FRONTEND_DIST="${SCRIPT_DIR}/frontend/dist"
|
||||
|
||||
DAEMON=true
|
||||
BUILD_FRONTEND=true
|
||||
GATEWAY_ONLY=false
|
||||
ACTION="start"
|
||||
|
||||
for arg in "$@"; do
|
||||
case "$arg" in
|
||||
--no-daemon) DAEMON=false ;;
|
||||
--no-build) BUILD_FRONTEND=false ;;
|
||||
--gateway-only) GATEWAY_ONLY=true ;;
|
||||
stop) ACTION="stop" ;;
|
||||
status) ACTION="status" ;;
|
||||
*) echo -e "${YELLOW}忽略未知参数: ${arg}${NC}" ;;
|
||||
@@ -164,7 +161,7 @@ do_status() {
|
||||
print_status "runtime_service" 8003
|
||||
print_status "frontend" "${FRONTEND_PORT}"
|
||||
echo ""
|
||||
echo -e " ${CYAN}ℹ${NC} Gateway 由 runtime_service 管理,通过前端启动任务触发"
|
||||
echo -e " ${CYAN}ℹ${NC} Gateway 由 runtime_service 管理,运行日志写入 runs/<run_id>/logs/gateway.log"
|
||||
echo ""
|
||||
|
||||
if [ -d "${FRONTEND_DIST}" ]; then
|
||||
@@ -208,12 +205,10 @@ start_single_daemon() {
|
||||
}
|
||||
|
||||
start_daemon() {
|
||||
if ! ${GATEWAY_ONLY}; then
|
||||
start_single_daemon "agent_service" "backend.apps.agent_service:app" 8000
|
||||
start_single_daemon "trading_service" "backend.apps.trading_service:app" 8001
|
||||
start_single_daemon "news_service" "backend.apps.news_service:app" 8002
|
||||
start_single_daemon "runtime_service" "backend.apps.runtime_service:app" 8003
|
||||
fi
|
||||
|
||||
echo -e " ${GREEN}▶${NC} frontend → http://0.0.0.0:${FRONTEND_PORT}"
|
||||
nohup env SERVICE_NAME="frontend" "${PYTHON}" -m uvicorn "backend.apps.frontend_service:app" \
|
||||
@@ -231,7 +226,8 @@ start_daemon() {
|
||||
echo " PID 目录: ${PID_DIR}/"
|
||||
echo ""
|
||||
echo " 查看状态: ./start.sh status"
|
||||
echo " 查看日志: tail -f ${LOG_DIR}/gateway.log"
|
||||
echo " 查看服务日志: tail -f ${LOG_DIR}/runtime_service.log"
|
||||
echo " 查看运行日志: tail -f runs/<run_id>/logs/gateway.log"
|
||||
echo " 停止服务: ./start.sh stop"
|
||||
echo ""
|
||||
}
|
||||
@@ -261,12 +257,10 @@ start_single_foreground() {
|
||||
start_foreground() {
|
||||
trap cleanup_foreground EXIT INT TERM
|
||||
|
||||
if ! ${GATEWAY_ONLY}; then
|
||||
start_single_foreground "agent_service" "backend.apps.agent_service:app" 8000
|
||||
start_single_foreground "trading_service" "backend.apps.trading_service:app" 8001
|
||||
start_single_foreground "news_service" "backend.apps.news_service:app" 8002
|
||||
start_single_foreground "runtime_service" "backend.apps.runtime_service:app" 8003
|
||||
fi
|
||||
|
||||
echo -e " ${GREEN}▶${NC} frontend → http://0.0.0.0:${FRONTEND_PORT}"
|
||||
env SERVICE_NAME="frontend" "${PYTHON}" -m uvicorn "backend.apps.frontend_service:app" \
|
||||
@@ -326,4 +320,3 @@ case "${ACTION}" in
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
Reference in New Issue
Block a user