From 2daf5717baedda20484850366d7d9e7fce429dd5 Mon Sep 17 00:00:00 2001 From: cillin Date: Tue, 17 Mar 2026 13:55:14 +0800 Subject: [PATCH] Add per-agent skill workspaces and TraderView management --- backend/agents/agent_workspace.py | 75 ++ backend/agents/prompt_factory.py | 105 +- backend/agents/skill_metadata.py | 79 ++ backend/agents/skills_manager.py | 480 ++++++++- backend/agents/toolkit_factory.py | 16 +- backend/agents/workspace_manager.py | 59 ++ backend/cli.py | 192 +++- backend/main.py | 21 +- backend/services/gateway.py | 914 +++++++++++++++++- backend/skills/__init__.py | 1 + backend/skills/builtin/__init__.py | 1 + .../builtin/fundamental_review/SKILL.md | 27 +- .../builtin/portfolio_decisioning/SKILL.md | 27 +- backend/skills/builtin/risk_review/SKILL.md | 27 +- .../skills/builtin/sentiment_review/SKILL.md | 27 +- .../skills/builtin/technical_review/SKILL.md | 27 +- .../skills/builtin/valuation_review/SKILL.md | 36 +- .../builtin/valuation_review/__init__.py | 1 + .../valuation_review/scripts/__init__.py | 1 + .../valuation_review/scripts/dcf_report.py | 71 ++ .../scripts/multiple_valuation_report.py | 115 +++ .../scripts/owner_earnings_report.py | 76 ++ .../customized/portfolio_decisioning/SKILL.md | 21 + .../skills/customized/risk_review/SKILL.md | 21 + .../customized/sentiment_review/SKILL.md | 21 + .../customized/technical_review/SKILL.md | 21 + .../customized/valuation_review/SKILL.md | 21 + backend/tests/test_agent_workspace.py | 191 ++++ .../tests/test_gateway_explain_handlers.py | 338 +++++++ backend/tests/test_skills_cli.py | 72 ++ backend/tests/test_valuation_scripts.py | 106 ++ backend/tools/analysis_tools.py | 242 ++--- frontend/src/App.jsx | 661 ++++++++++++- .../src/components/RuntimeSettingsPanel.jsx | 247 +++++ frontend/src/components/TraderView.jsx | 765 +++++++++++++++ 35 files changed, 4774 insertions(+), 331 deletions(-) create mode 100644 backend/agents/agent_workspace.py create mode 100644 backend/agents/skill_metadata.py create mode 100644 backend/skills/__init__.py create mode 100644 backend/skills/builtin/__init__.py create mode 100644 backend/skills/builtin/valuation_review/__init__.py create mode 100644 backend/skills/builtin/valuation_review/scripts/__init__.py create mode 100644 backend/skills/builtin/valuation_review/scripts/dcf_report.py create mode 100644 backend/skills/builtin/valuation_review/scripts/multiple_valuation_report.py create mode 100644 backend/skills/builtin/valuation_review/scripts/owner_earnings_report.py create mode 100644 backend/skills/customized/portfolio_decisioning/SKILL.md create mode 100644 backend/skills/customized/risk_review/SKILL.md create mode 100644 backend/skills/customized/sentiment_review/SKILL.md create mode 100644 backend/skills/customized/technical_review/SKILL.md create mode 100644 backend/skills/customized/valuation_review/SKILL.md create mode 100644 backend/tests/test_agent_workspace.py create mode 100644 backend/tests/test_skills_cli.py create mode 100644 backend/tests/test_valuation_scripts.py create mode 100644 frontend/src/components/RuntimeSettingsPanel.jsx create mode 100644 frontend/src/components/TraderView.jsx diff --git a/backend/agents/agent_workspace.py b/backend/agents/agent_workspace.py new file mode 100644 index 0000000..c9cd23d --- /dev/null +++ b/backend/agents/agent_workspace.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +"""Per-agent run-scoped workspace configuration helpers.""" + +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any, Dict, List, Optional + +import yaml + + +@dataclass(frozen=True) +class AgentWorkspaceConfig: + """Structured agent config loaded from runs//agents//agent.yaml.""" + + values: Dict[str, Any] = field(default_factory=dict) + + @property + def prompt_files(self) -> Optional[List[str]]: + raw = self.values.get("prompt_files") + if not isinstance(raw, list): + return None + files = [ + str(item).strip() + for item in raw + if isinstance(item, str) and str(item).strip() + ] + return files or None + + @property + def enabled_skills(self) -> List[str]: + return _normalized_string_list(self.values.get("enabled_skills")) + + @property + def disabled_skills(self) -> List[str]: + return _normalized_string_list(self.values.get("disabled_skills")) + + @property + def active_tool_groups(self) -> Optional[List[str]]: + groups = _normalized_string_list(self.values.get("active_tool_groups")) + return groups or None + + @property + def disabled_tool_groups(self) -> List[str]: + return _normalized_string_list(self.values.get("disabled_tool_groups")) + + def get(self, key: str, default: Any = None) -> Any: + return self.values.get(key, default) + + +def _normalized_string_list(raw: Any) -> List[str]: + if not isinstance(raw, list): + return [] + seen: List[str] = [] + for item in raw: + if not isinstance(item, str): + continue + value = item.strip() + if value and value not in seen: + seen.append(value) + return seen + + +def load_agent_workspace_config(path: Path) -> AgentWorkspaceConfig: + """Load agent.yaml if present.""" + if not path.exists() or not path.is_file(): + return AgentWorkspaceConfig() + + raw = path.read_text(encoding="utf-8").strip() + if not raw: + return AgentWorkspaceConfig() + + parsed = yaml.safe_load(raw) or {} + if not isinstance(parsed, dict): + parsed = {} + return AgentWorkspaceConfig(values=parsed) diff --git a/backend/agents/prompt_factory.py b/backend/agents/prompt_factory.py index 142b2fd..187b83f 100644 --- a/backend/agents/prompt_factory.py +++ b/backend/agents/prompt_factory.py @@ -4,6 +4,7 @@ from pathlib import Path from typing import Any, Optional +from .agent_workspace import load_agent_workspace_config from backend.config.bootstrap_config import get_bootstrap_config_for_run from .prompt_loader import PromptLoader from .skills_manager import SkillsManager @@ -23,6 +24,26 @@ def _append_section(parts: list[str], title: str, content: str) -> None: parts.append(f"## {title}\n{content}") +def _build_skill_metadata_summary(skills_manager: SkillsManager, config_name: str, agent_id: str) -> str: + """Create a compact summary of active skills for prompt routing.""" + metadata_items = skills_manager.list_active_skill_metadata(config_name, agent_id) + if not metadata_items: + return "" + + lines: list[str] = [ + "You can use the following active skills. Prefer the most relevant one, then read its SKILL.md if needed for detailed workflow:", + ] + for item in metadata_items: + parts = [f"- `{item.skill_name}`"] + if item.description: + parts.append(item.description) + if item.version: + parts.append(f"version: {item.version}") + parts.append(f"path: {item.path}") + lines.append(" | ".join(parts)) + return "\n".join(lines) + + def build_agent_system_prompt( agent_id: str, config_name: str, @@ -31,6 +52,13 @@ def build_agent_system_prompt( ) -> str: """Build the final system prompt for an agent.""" sections: list[str] = [] + canonical_agent_id = ( + "portfolio_manager" + if "portfolio" in agent_id + else "risk_manager" + if "risk" in agent_id and not analyst_type + else agent_id + ) if analyst_type: personas_config = _prompt_loader.load_yaml_config( @@ -56,11 +84,21 @@ def build_agent_system_prompt( "portfolio_manager", "system", ) + elif canonical_agent_id == "portfolio_manager": + base_prompt = _prompt_loader.load_prompt( + "portfolio_manager", + "system", + ) elif agent_id == "risk_manager": base_prompt = _prompt_loader.load_prompt( "risk_manager", "system", ) + elif canonical_agent_id == "risk_manager": + base_prompt = _prompt_loader.load_prompt( + "risk_manager", + "system", + ) else: raise ValueError(f"Unsupported agent prompt build for: {agent_id}") @@ -69,6 +107,7 @@ def build_agent_system_prompt( skills_manager = SkillsManager() asset_dir = skills_manager.get_agent_asset_dir(config_name, agent_id) asset_dir.mkdir(parents=True, exist_ok=True) + agent_config = load_agent_workspace_config(asset_dir / "agent.yaml") bootstrap_config = get_bootstrap_config_for_run( skills_manager.project_root, config_name, @@ -80,26 +119,62 @@ def build_agent_system_prompt( bootstrap_config.prompt_body, ) - _append_section( - sections, - "Role", - _read_file_if_exists(asset_dir / "ROLE.md"), - ) - _append_section( - sections, - "Style", - _read_file_if_exists(asset_dir / "STYLE.md"), - ) - _append_section( - sections, - "Policy", - _read_file_if_exists(asset_dir / "POLICY.md"), - ) + prompt_files = agent_config.prompt_files or [ + "SOUL.md", + "PROFILE.md", + "AGENTS.md", + "POLICY.md", + "MEMORY.md", + ] + included_files = set(prompt_files) + title_map = { + "SOUL.md": "Soul", + "PROFILE.md": "Profile", + "AGENTS.md": "Agent Guide", + "POLICY.md": "Policy", + "MEMORY.md": "Memory", + "HEARTBEAT.md": "Heartbeat", + "ROLE.md": "Role", + "STYLE.md": "Style", + } + for filename in prompt_files: + _append_section( + sections, + title_map.get(filename, filename), + _read_file_if_exists(asset_dir / filename), + ) + + if "ROLE.md" not in included_files: + _append_section( + sections, + "Role", + _read_file_if_exists(asset_dir / "ROLE.md"), + ) + if "STYLE.md" not in included_files: + _append_section( + sections, + "Style", + _read_file_if_exists(asset_dir / "STYLE.md"), + ) + if "POLICY.md" not in included_files: + _append_section( + sections, + "Policy", + _read_file_if_exists(asset_dir / "POLICY.md"), + ) skill_prompt = toolkit.get_agent_skill_prompt() if skill_prompt: _append_section(sections, "Skills", str(skill_prompt)) + metadata_summary = _build_skill_metadata_summary( + skills_manager=skills_manager, + config_name=config_name, + agent_id=agent_id, + ) + if metadata_summary: + _append_section(sections, "Active Skill Catalog", metadata_summary) + activated_notes = toolkit.get_activated_notes() if activated_notes: _append_section(sections, "Tool Usage Notes", str(activated_notes)) diff --git a/backend/agents/skill_metadata.py b/backend/agents/skill_metadata.py new file mode 100644 index 0000000..b5fda7b --- /dev/null +++ b/backend/agents/skill_metadata.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +"""Skill metadata parsing helpers for SKILL.md files.""" + +from dataclasses import dataclass, field +from pathlib import Path +from typing import List + +import yaml + + +@dataclass(frozen=True) +class SkillMetadata: + """Parsed metadata for a skill package.""" + + skill_name: str + path: Path + source: str + name: str + description: str + version: str = "" + tools: List[str] = field(default_factory=list) + + +def parse_skill_metadata(skill_dir: Path, source: str) -> SkillMetadata: + """Parse SKILL.md frontmatter with a forgiving schema.""" + skill_name = skill_dir.name + skill_file = skill_dir / "SKILL.md" + if not skill_file.exists(): + return SkillMetadata( + skill_name=skill_name, + path=skill_dir, + source=source, + name=skill_name, + description="", + ) + + raw = skill_file.read_text(encoding="utf-8").strip() + frontmatter = {} + body = raw + if raw.startswith("---"): + parts = raw.split("---", 2) + if len(parts) >= 3: + try: + frontmatter = yaml.safe_load(parts[1].strip()) or {} + except yaml.YAMLError: + frontmatter = {} + body = parts[2].strip() + if not isinstance(frontmatter, dict): + frontmatter = {} + + description = str(frontmatter.get("description") or "").strip() + if not description and body: + description = body.splitlines()[0].strip().lstrip("#").strip() + + return SkillMetadata( + skill_name=skill_name, + path=skill_dir, + source=source, + name=str(frontmatter.get("name") or skill_name).strip() or skill_name, + description=description, + version=str(frontmatter.get("version") or "").strip(), + tools=_string_list(frontmatter.get("tools")), + ) + + +def _string_list(value) -> List[str]: + if isinstance(value, str): + item = value.strip() + return [item] if item else [] + if not isinstance(value, list): + return [] + seen: List[str] = [] + for item in value: + if not isinstance(item, str): + continue + normalized = item.strip() + if normalized and normalized not in seen: + seen.append(normalized) + return seen diff --git a/backend/agents/skills_manager.py b/backend/agents/skills_manager.py index 42aac22..4e522a8 100644 --- a/backend/agents/skills_manager.py +++ b/backend/agents/skills_manager.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -"""Manage builtin/customized/active skill directories for each run.""" +"""Manage agent-installed and run-active skill directories for each run.""" from pathlib import Path import shutil @@ -7,6 +7,8 @@ from typing import Dict, Iterable, List import yaml +from backend.agents.agent_workspace import load_agent_workspace_config +from backend.agents.skill_metadata import SkillMetadata, parse_skill_metadata from backend.config.bootstrap_config import get_bootstrap_config_for_run @@ -26,12 +28,283 @@ class SkillsManager: def get_active_root(self, config_name: str) -> Path: return self.runs_root / config_name / "skills" / "active" + def get_agent_skills_root(self, config_name: str, agent_id: str) -> Path: + return self.get_agent_asset_dir(config_name, agent_id) / "skills" + + def get_agent_active_root(self, config_name: str, agent_id: str) -> Path: + return self.get_agent_skills_root(config_name, agent_id) / "active" + + def get_agent_installed_root(self, config_name: str, agent_id: str) -> Path: + return self.get_agent_skills_root(config_name, agent_id) / "installed" + + def get_agent_disabled_root(self, config_name: str, agent_id: str) -> Path: + return self.get_agent_skills_root(config_name, agent_id) / "disabled" + + def get_agent_local_root(self, config_name: str, agent_id: str) -> Path: + return self.get_agent_skills_root(config_name, agent_id) / "local" + def get_activation_manifest_path(self, config_name: str) -> Path: return self.runs_root / config_name / "skills" / "activation.yaml" def get_agent_asset_dir(self, config_name: str, agent_id: str) -> Path: return self.runs_root / config_name / "agents" / agent_id + def list_skill_catalog(self) -> List[SkillMetadata]: + """Return builtin/customized skills with parsed metadata.""" + catalog: Dict[str, SkillMetadata] = {} + + for source, root in ( + ("builtin", self.builtin_root), + ("customized", self.customized_root), + ): + if not root.exists(): + continue + for skill_dir in sorted(root.iterdir(), key=lambda item: item.name): + if not skill_dir.is_dir(): + continue + if not (skill_dir / "SKILL.md").exists(): + continue + metadata = parse_skill_metadata(skill_dir, source=source) + catalog[metadata.skill_name] = metadata + + return sorted(catalog.values(), key=lambda item: item.skill_name) + + def list_agent_skill_catalog( + self, + config_name: str, + agent_id: str, + ) -> List[SkillMetadata]: + """Return shared plus agent-local skills for one agent.""" + catalog = { + item.skill_name: item + for item in self.list_skill_catalog() + } + for item in self.list_agent_local_skills(config_name, agent_id): + catalog[item.skill_name] = item + return sorted(catalog.values(), key=lambda item: item.skill_name) + + def list_active_skill_metadata( + self, + config_name: str, + agent_id: str, + ) -> List[SkillMetadata]: + """Return metadata for active skills synced for one agent.""" + active_root = self.get_agent_active_root(config_name, agent_id) + if not active_root.exists(): + return [] + + items: List[SkillMetadata] = [] + for skill_dir in sorted(active_root.iterdir(), key=lambda item: item.name): + if not skill_dir.is_dir(): + continue + if not (skill_dir / "SKILL.md").exists(): + continue + items.append(parse_skill_metadata(skill_dir, source="active")) + return items + + def list_agent_local_skills( + self, + config_name: str, + agent_id: str, + ) -> List[SkillMetadata]: + """Return metadata for agent-private local skills.""" + local_root = self.get_agent_local_root(config_name, agent_id) + if not local_root.exists(): + return [] + + items: List[SkillMetadata] = [] + for skill_dir in sorted(local_root.iterdir(), key=lambda item: item.name): + if not skill_dir.is_dir(): + continue + if not (skill_dir / "SKILL.md").exists(): + continue + items.append(parse_skill_metadata(skill_dir, source="local")) + return items + + def load_skill_document(self, skill_name: str) -> Dict[str, object]: + """Return skill metadata plus markdown body for one skill.""" + source_dir = self._resolve_source_dir(skill_name) + return self._load_skill_document_from_dir( + source_dir, + source="customized" if source_dir.parent == self.customized_root else "builtin", + ) + + def load_agent_skill_document( + self, + config_name: str, + agent_id: str, + skill_name: str, + ) -> Dict[str, object]: + """Return skill metadata plus markdown body for one agent-visible skill.""" + source_dir = self._resolve_agent_skill_source_dir( + config_name=config_name, + agent_id=agent_id, + skill_name=skill_name, + ) + source = "local" + if source_dir.parent == self.customized_root: + source = "customized" + elif source_dir.parent == self.builtin_root: + source = "builtin" + elif source_dir.parent == self.get_agent_installed_root(config_name, agent_id): + source = "installed" + return self._load_skill_document_from_dir(source_dir, source=source) + + def create_agent_local_skill( + self, + config_name: str, + agent_id: str, + skill_name: str, + ) -> Path: + """Create a new local skill directory with a default SKILL.md.""" + normalized = _normalize_skill_name(skill_name) + if not normalized: + raise ValueError("Skill name is required.") + local_root = self.get_agent_local_root(config_name, agent_id) + local_root.mkdir(parents=True, exist_ok=True) + skill_dir = local_root / normalized + if skill_dir.exists(): + raise FileExistsError(f"Local skill already exists: {normalized}") + skill_dir.mkdir(parents=True, exist_ok=False) + (skill_dir / "SKILL.md").write_text( + "---\n" + f"name: {normalized}\n" + "description: 当用户提出与该本地技能相关的专门任务时,应使用此技能。\n" + "version: 1.0.0\n" + "---\n\n" + f"# {normalized}\n\n" + "在这里描述该交易员的专有分析流程、判断框架和可复用步骤。\n", + encoding="utf-8", + ) + return skill_dir + + def update_agent_local_skill( + self, + config_name: str, + agent_id: str, + skill_name: str, + content: str, + ) -> Path: + """Overwrite one agent-local SKILL.md.""" + normalized = _normalize_skill_name(skill_name) + if not normalized: + raise ValueError("Skill name is required.") + skill_dir = self.get_agent_local_root(config_name, agent_id) / normalized + if not skill_dir.exists(): + raise FileNotFoundError(f"Unknown local skill: {normalized}") + (skill_dir / "SKILL.md").write_text(content, encoding="utf-8") + return skill_dir + + def delete_agent_local_skill( + self, + config_name: str, + agent_id: str, + skill_name: str, + ) -> None: + """Delete one agent-local skill directory.""" + normalized = _normalize_skill_name(skill_name) + if not normalized: + raise ValueError("Skill name is required.") + skill_dir = self.get_agent_local_root(config_name, agent_id) / normalized + if not skill_dir.exists(): + raise FileNotFoundError(f"Unknown local skill: {normalized}") + shutil.rmtree(skill_dir) + + def _load_skill_document_from_dir( + self, + source_dir: Path, + *, + source: str, + ) -> Dict[str, object]: + """Return metadata plus markdown body for one resolved skill directory.""" + metadata = parse_skill_metadata( + source_dir, + source=source, + ) + skill_file = source_dir / "SKILL.md" + raw = skill_file.read_text(encoding="utf-8").strip() if skill_file.exists() else "" + body = raw + if raw.startswith("---"): + parts = raw.split("---", 2) + if len(parts) >= 3: + body = parts[2].strip() + + return { + "skill_name": metadata.skill_name, + "name": metadata.name, + "description": metadata.description, + "version": metadata.version, + "tools": metadata.tools, + "source": metadata.source, + "content": body, + } + + def update_agent_skill_overrides( + self, + config_name: str, + agent_id: str, + *, + enable: Iterable[str] | None = None, + disable: Iterable[str] | None = None, + ) -> Dict[str, List[str]]: + """Persist per-agent enabled/disabled skill overrides in agent.yaml.""" + asset_dir = self.get_agent_asset_dir(config_name, agent_id) + asset_dir.mkdir(parents=True, exist_ok=True) + config_path = asset_dir / "agent.yaml" + current = load_agent_workspace_config(config_path) + values = dict(current.values) + + enabled = _dedupe_preserve_order(current.enabled_skills) + disabled_set = set(current.disabled_skills) + + for skill_name in enable or []: + if skill_name not in enabled: + enabled.append(skill_name) + disabled_set.discard(skill_name) + + for skill_name in disable or []: + disabled_set.add(skill_name) + enabled = [item for item in enabled if item != skill_name] + + values["enabled_skills"] = enabled + values["disabled_skills"] = sorted(disabled_set) + config_path.write_text( + yaml.safe_dump(values, allow_unicode=True, sort_keys=False), + encoding="utf-8", + ) + return { + "enabled_skills": enabled, + "disabled_skills": sorted(disabled_set), + } + + def forget_agent_skill_overrides( + self, + config_name: str, + agent_id: str, + skill_names: Iterable[str], + ) -> Dict[str, List[str]]: + """Remove skills from both enabled/disabled overrides in agent.yaml.""" + asset_dir = self.get_agent_asset_dir(config_name, agent_id) + asset_dir.mkdir(parents=True, exist_ok=True) + config_path = asset_dir / "agent.yaml" + current = load_agent_workspace_config(config_path) + values = dict(current.values) + removed = set(skill_names) + + enabled = [item for item in current.enabled_skills if item not in removed] + disabled = [item for item in current.disabled_skills if item not in removed] + + values["enabled_skills"] = enabled + values["disabled_skills"] = disabled + config_path.write_text( + yaml.safe_dump(values, allow_unicode=True, sort_keys=False), + encoding="utf-8", + ) + return { + "enabled_skills": enabled, + "disabled_skills": disabled, + } + def ensure_activation_manifest(self, config_name: str) -> Path: manifest_path = self.get_activation_manifest_path(config_name) manifest_path.parent.mkdir(parents=True, exist_ok=True) @@ -62,6 +335,34 @@ class SkillsManager: raise FileNotFoundError(f"Unknown skill: {skill_name}") + def _resolve_agent_skill_source_dir( + self, + config_name: str, + agent_id: str, + skill_name: str, + ) -> Path: + """Resolve one skill from the agent-local workspace or shared registry.""" + for root in ( + self.get_agent_local_root(config_name, agent_id), + self.get_agent_installed_root(config_name, agent_id), + ): + candidate = root / skill_name + if candidate.exists() and (candidate / "SKILL.md").exists(): + return candidate + return self._resolve_source_dir(skill_name) + + def _skill_exists_for_agent( + self, + config_name: str, + agent_id: str, + skill_name: str, + ) -> bool: + try: + self._resolve_agent_skill_source_dir(config_name, agent_id, skill_name) + except FileNotFoundError: + return False + return True + def _persist_runtime_edits( self, config_name: str, @@ -125,6 +426,13 @@ class SkillsManager: bootstrap = get_bootstrap_config_for_run(self.project_root, config_name) override = bootstrap.agent_override(agent_id) skills = list(override.get("skills", list(default_skills))) + agent_config = load_agent_workspace_config( + self.get_agent_asset_dir(config_name, agent_id) / "agent.yaml", + ) + + for skill_name in agent_config.enabled_skills: + if skill_name not in skills: + skills.append(skill_name) manifest = self.load_activation_manifest(config_name) for skill_name in manifest.get("global_enabled_skills", []): @@ -139,51 +447,61 @@ class SkillsManager: disabled.update( manifest.get("agent_disabled_skills", {}).get(agent_id, []), ) + disabled.update(agent_config.disabled_skills) - return [skill for skill in skills if skill not in disabled] + for item in self.list_agent_local_skills(config_name, agent_id): + if item.skill_name not in skills: + skills.append(item.skill_name) - def sync_active_skills( + return [ + skill + for skill in skills + if skill not in disabled + and self._skill_exists_for_agent(config_name, agent_id, skill) + ] + + def sync_skill_dirs( self, - config_name: str, - skill_names: Iterable[str], + target_root: Path, + skill_sources: Dict[str, Path], ) -> List[Path]: - """Sync selected skills into the run workspace and return their paths.""" - active_root = self.get_active_root(config_name) - active_root.mkdir(parents=True, exist_ok=True) + """Sync selected skill directories into one target root.""" + target_root.mkdir(parents=True, exist_ok=True) synced_paths: List[Path] = [] - wanted = set(skill_names) + wanted = set(skill_sources) - for existing in active_root.iterdir(): + for existing in target_root.iterdir(): if existing.is_dir() and existing.name not in wanted: - self._persist_runtime_edits( - config_name=config_name, - skill_name=existing.name, - active_dir=existing, - ) shutil.rmtree(existing) - for skill_name in skill_names: - source_dir = self._resolve_source_dir(skill_name) - target_dir = active_root / skill_name + for skill_name, source_dir in skill_sources.items(): + target_dir = target_root / skill_name if target_dir.exists(): - self._persist_runtime_edits( - config_name=config_name, - skill_name=skill_name, - active_dir=target_dir, - ) shutil.rmtree(target_dir) shutil.copytree(source_dir, target_dir) synced_paths.append(target_dir) return synced_paths + def sync_active_skills( + self, + target_root: Path, + skill_names: Iterable[str], + ) -> List[Path]: + """Sync selected shared skills into one active directory.""" + skill_sources = { + skill_name: self._resolve_source_dir(skill_name) + for skill_name in skill_names + } + return self.sync_skill_dirs(target_root, skill_sources) + def prepare_active_skills( self, config_name: str, agent_defaults: Dict[str, Iterable[str]], ) -> Dict[str, List[Path]]: - """Resolve all agent skills, sync the union once, and map paths per agent.""" + """Resolve all agent skills into per-agent installed/active workspaces.""" resolved: Dict[str, List[str]] = {} union: List[str] = [] @@ -198,10 +516,112 @@ class SkillsManager: if skill_name not in union: union.append(skill_name) - self.sync_active_skills(config_name=config_name, skill_names=union) - active_root = self.get_active_root(config_name) + # Maintain the legacy union directory for compatibility/debugging. + # Agent-local skills remain private to the agent workspace. + self.sync_active_skills( + target_root=self.get_active_root(config_name), + skill_names=[ + skill_name + for skill_name in union + if self._is_shared_skill(skill_name) + ], + ) - return { - agent_id: [active_root / skill_name for skill_name in skill_names] - for agent_id, skill_names in resolved.items() - } + active_map: Dict[str, List[Path]] = {} + for agent_id, skill_names in resolved.items(): + installed_sources = { + skill_name: self._resolve_source_dir(skill_name) + for skill_name in skill_names + if (self.get_agent_local_root(config_name, agent_id) / skill_name).exists() is False + } + installed_paths = self.sync_skill_dirs( + target_root=self.get_agent_installed_root(config_name, agent_id), + skill_sources=installed_sources, + ) + + local_root = self.get_agent_local_root(config_name, agent_id) + local_sources = { + skill_name: local_root / skill_name + for skill_name in skill_names + if (local_root / skill_name).exists() + } + active_sources = { + path.name: path for path in installed_paths + } + active_sources.update(local_sources) + active_map[agent_id] = self.sync_skill_dirs( + target_root=self.get_agent_active_root(config_name, agent_id), + skill_sources=active_sources, + ) + + disabled_names = _dedupe_preserve_order( + self._resolve_disabled_skill_names( + config_name=config_name, + agent_id=agent_id, + default_skills=agent_defaults.get(agent_id, []), + ), + ) + disabled_sources = { + skill_name: self._resolve_agent_skill_source_dir( + config_name=config_name, + agent_id=agent_id, + skill_name=skill_name, + ) + for skill_name in disabled_names + } + self.sync_skill_dirs( + target_root=self.get_agent_disabled_root(config_name, agent_id), + skill_sources=disabled_sources, + ) + + return active_map + + def _is_shared_skill(self, skill_name: str) -> bool: + try: + self._resolve_source_dir(skill_name) + except FileNotFoundError: + return False + return True + + def _resolve_disabled_skill_names( + self, + config_name: str, + agent_id: str, + default_skills: Iterable[str], + ) -> List[str]: + """Resolve explicit disabled skills for one agent.""" + bootstrap = get_bootstrap_config_for_run(self.project_root, config_name) + override = bootstrap.agent_override(agent_id) + baseline = list(override.get("skills", list(default_skills))) + agent_config = load_agent_workspace_config( + self.get_agent_asset_dir(config_name, agent_id) / "agent.yaml", + ) + manifest = self.load_activation_manifest(config_name) + disabled = list(manifest.get("global_disabled_skills", [])) + disabled.extend(manifest.get("agent_disabled_skills", {}).get(agent_id, [])) + disabled.extend(agent_config.disabled_skills) + for skill_name in baseline: + if skill_name in agent_config.disabled_skills and skill_name not in disabled: + disabled.append(skill_name) + for item in self.list_agent_local_skills(config_name, agent_id): + if item.skill_name in agent_config.disabled_skills and item.skill_name not in disabled: + disabled.append(item.skill_name) + return [ + skill + for skill in disabled + if self._skill_exists_for_agent(config_name, agent_id, skill) + ] + + +def _dedupe_preserve_order(items: Iterable[str]) -> List[str]: + result: List[str] = [] + for item in items: + if item not in result: + result.append(item) + return result + + +def _normalize_skill_name(raw_name: str) -> str: + normalized = str(raw_name or "").strip().lower().replace(" ", "_").replace("-", "_") + allowed = [ch for ch in normalized if ch.isalnum() or ch == "_"] + return "".join(allowed).strip("_") diff --git a/backend/agents/toolkit_factory.py b/backend/agents/toolkit_factory.py index 192a9d7..92fbb3d 100644 --- a/backend/agents/toolkit_factory.py +++ b/backend/agents/toolkit_factory.py @@ -3,6 +3,7 @@ from typing import Any, Dict, Iterable +from .agent_workspace import load_agent_workspace_config from backend.config.bootstrap_config import get_bootstrap_config_for_run import yaml @@ -151,6 +152,9 @@ def create_agent_toolkit( profiles = load_agent_profiles() profile = profiles.get(agent_id, {}) skills_manager = SkillsManager() + agent_config = load_agent_workspace_config( + skills_manager.get_agent_asset_dir(config_name, agent_id) / "agent.yaml", + ) bootstrap_config = get_bootstrap_config_for_run( skills_manager.project_root, config_name, @@ -158,8 +162,16 @@ def create_agent_toolkit( override = bootstrap_config.agent_override(agent_id) active_groups = override.get( "active_tool_groups", - profile.get("active_tool_groups", []), + agent_config.active_tool_groups + or profile.get("active_tool_groups", []), ) + disabled_groups = set(agent_config.disabled_tool_groups) + if disabled_groups: + active_groups = [ + group_name + for group_name in active_groups + if group_name not in disabled_groups + ] toolkit = Toolkit( agent_skill_instruction=( @@ -184,7 +196,7 @@ def create_agent_toolkit( default_skills=profile.get("skills", []), ) active_skill_dirs = [ - skills_manager.get_active_root(config_name) / skill_name + skills_manager.get_agent_active_root(config_name, agent_id) / skill_name for skill_name in skill_names ] diff --git a/backend/agents/workspace_manager.py b/backend/agents/workspace_manager.py index c3582ea..c2775e9 100644 --- a/backend/agents/workspace_manager.py +++ b/backend/agents/workspace_manager.py @@ -4,6 +4,8 @@ from pathlib import Path from typing import Dict, Iterable, Optional +import yaml + from .skills_manager import SkillsManager @@ -59,6 +61,10 @@ class WorkspaceManager: agent_id, ) asset_dir.mkdir(parents=True, exist_ok=True) + (asset_dir / "skills" / "installed").mkdir(parents=True, exist_ok=True) + (asset_dir / "skills" / "active").mkdir(parents=True, exist_ok=True) + (asset_dir / "skills" / "disabled").mkdir(parents=True, exist_ok=True) + (asset_dir / "skills" / "local").mkdir(parents=True, exist_ok=True) self._ensure_file( asset_dir / "ROLE.md", @@ -81,6 +87,35 @@ class WorkspaceManager: f"{policy_seed}".strip() + "\n", ) + self._ensure_file( + asset_dir / "SOUL.md", + "# Soul\n\n" + "Describe the agent's temperament, reasoning posture, and voice.\n\n", + ) + self._ensure_file( + asset_dir / "PROFILE.md", + "# Profile\n\n" + "Track this agent's long-lived investment style, preferences, and strengths.\n\n", + ) + self._ensure_file( + asset_dir / "AGENTS.md", + "# Agent Guide\n\n" + "Document how this agent should work, collaborate, and choose tools or skills.\n\n", + ) + self._ensure_file( + asset_dir / "MEMORY.md", + "# Memory\n\n" + "Store durable lessons, heuristics, and reminders for this agent.\n\n", + ) + self._ensure_file( + asset_dir / "HEARTBEAT.md", + "# Heartbeat\n\n" + "Optional checklist for periodic review or self-reflection.\n\n", + ) + self._ensure_agent_yaml( + asset_dir / "agent.yaml", + agent_id=agent_id, + ) return asset_dir def initialize_default_assets( @@ -138,3 +173,27 @@ class WorkspaceManager: def _ensure_file(path: Path, content: str) -> None: if not path.exists(): path.write_text(content, encoding="utf-8") + + @staticmethod + def _ensure_agent_yaml(path: Path, agent_id: str) -> None: + if path.exists(): + return + + payload = { + "agent_id": agent_id, + "prompt_files": [ + "SOUL.md", + "PROFILE.md", + "AGENTS.md", + "POLICY.md", + "MEMORY.md", + ], + "enabled_skills": [], + "disabled_skills": [], + "active_tool_groups": [], + "disabled_tool_groups": [], + } + path.write_text( + yaml.safe_dump(payload, allow_unicode=True, sort_keys=False), + encoding="utf-8", + ) diff --git a/backend/cli.py b/backend/cli.py index a12d543..be23d09 100644 --- a/backend/cli.py +++ b/backend/cli.py @@ -24,7 +24,9 @@ from rich.prompt import Confirm from rich.table import Table from dotenv import load_dotenv +from backend.agents.agent_workspace import load_agent_workspace_config from backend.agents.prompt_loader import PromptLoader +from backend.agents.skills_manager import SkillsManager from backend.agents.workspace_manager import WorkspaceManager from backend.data.market_ingest import ingest_symbols from backend.data.market_store import MarketStore @@ -38,12 +40,21 @@ app = typer.Typer( ) ingest_app = typer.Typer(help="Ingest Polygon market data into the research warehouse.") app.add_typer(ingest_app, name="ingest") +skills_app = typer.Typer(help="Inspect and manage per-agent skills.") +app.add_typer(skills_app, name="skills") console = Console() _prompt_loader = PromptLoader() load_dotenv() +def _normalize_typer_value(value, default): + """Allow CLI command functions to be called directly in tests/internal code.""" + if hasattr(value, "default"): + return value.default + return default if value is None else value + + def get_project_root() -> Path: """Get the project root directory.""" # Assuming cli.py is in backend/ @@ -213,6 +224,19 @@ def initialize_workspace(config_name: str) -> Path: return workspace_manager.get_run_dir(config_name) +def _require_agent_asset_dir(config_name: str, agent_id: str) -> Path: + manager = WorkspaceManager(project_root=get_project_root()) + manager.initialize_default_assets( + config_name=config_name, + agent_ids=[agent_id], + analyst_personas=_prompt_loader.load_yaml_config( + "analyst", + "personas", + ), + ) + return manager.skills_manager.get_agent_asset_dir(config_name, agent_id) + + def _resolve_symbols(raw_tickers: Optional[str], config_name: Optional[str] = None) -> list[str]: """Resolve symbols from explicit input or runtime bootstrap config.""" if raw_tickers and raw_tickers.strip(): @@ -622,6 +646,137 @@ def ingest_report( console.print(table) +@skills_app.command("list") +def skills_list( + config_name: str = typer.Option( + "default", + "--config-name", + "-c", + help="Run config name.", + ), + agent_id: Optional[str] = typer.Option( + None, + "--agent-id", + "-a", + help="Optional agent id to show resolved status for.", + ), +): + """List available skills and optional agent-level enablement state.""" + project_root = get_project_root() + skills_manager = SkillsManager(project_root=project_root) + catalog = ( + skills_manager.list_agent_skill_catalog(config_name, agent_id) + if agent_id + else skills_manager.list_skill_catalog() + ) + if not catalog: + console.print("[yellow]No skills found[/yellow]") + raise typer.Exit(0) + + agent_config = None + resolved_skills = set() + if agent_id: + asset_dir = _require_agent_asset_dir(config_name, agent_id) + agent_config = load_agent_workspace_config(asset_dir / "agent.yaml") + resolved_skills = set( + skills_manager.resolve_agent_skill_names( + config_name=config_name, + agent_id=agent_id, + default_skills=[], + ), + ) + + table = Table(title="Skill Catalog") + table.add_column("Skill", style="cyan") + table.add_column("Source") + table.add_column("Description") + if agent_id: + table.add_column("Status") + + enabled = set(agent_config.enabled_skills) if agent_config else set() + disabled = set(agent_config.disabled_skills) if agent_config else set() + for skill in catalog: + row = [ + skill.skill_name, + skill.source, + skill.description or "-", + ] + if agent_id: + if skill.skill_name in disabled: + status = "disabled" + elif skill.skill_name in enabled: + status = "enabled" + elif skill.skill_name in resolved_skills: + status = "active" + else: + status = "-" + row.append(status) + table.add_row(*row) + console.print(table) + + +@skills_app.command("enable") +def skills_enable( + agent_id: str = typer.Option(..., "--agent-id", "-a", help="Agent id."), + skill: str = typer.Option(..., "--skill", "-s", help="Skill name."), + config_name: str = typer.Option( + "default", + "--config-name", + "-c", + help="Run config name.", + ), +): + """Enable a skill for one agent in agent.yaml.""" + asset_dir = _require_agent_asset_dir(config_name, agent_id) + skills_manager = SkillsManager(project_root=get_project_root()) + catalog = { + item.skill_name + for item in skills_manager.list_agent_skill_catalog(config_name, agent_id) + } + if skill not in catalog: + console.print(f"[red]Unknown skill: {skill}[/red]") + raise typer.Exit(1) + + result = skills_manager.update_agent_skill_overrides( + config_name=config_name, + agent_id=agent_id, + enable=[skill], + ) + console.print( + f"[green]Enabled[/green] `{skill}` for `{agent_id}` " + f"([{asset_dir / 'agent.yaml'}])", + ) + console.print(f"Enabled skills: {', '.join(result['enabled_skills']) or '-'}") + console.print(f"Disabled skills: {', '.join(result['disabled_skills']) or '-'}") + + +@skills_app.command("disable") +def skills_disable( + agent_id: str = typer.Option(..., "--agent-id", "-a", help="Agent id."), + skill: str = typer.Option(..., "--skill", "-s", help="Skill name."), + config_name: str = typer.Option( + "default", + "--config-name", + "-c", + help="Run config name.", + ), +): + """Disable a skill for one agent in agent.yaml.""" + asset_dir = _require_agent_asset_dir(config_name, agent_id) + skills_manager = SkillsManager(project_root=get_project_root()) + result = skills_manager.update_agent_skill_overrides( + config_name=config_name, + agent_id=agent_id, + disable=[skill], + ) + console.print( + f"[yellow]Disabled[/yellow] `{skill}` for `{agent_id}` " + f"([{asset_dir / 'agent.yaml'}])", + ) + console.print(f"Enabled skills: {', '.join(result['enabled_skills']) or '-'}") + console.print(f"Disabled skills: {', '.join(result['disabled_skills']) or '-'}") + + @app.command() def backtest( start: Optional[str] = typer.Option( @@ -684,6 +839,7 @@ def backtest( border_style="cyan", ), ) + poll_interval = int(_normalize_typer_value(poll_interval, 10)) # Validate dates - required for backtest if not start or not end: @@ -801,12 +957,22 @@ def live( "-p", help="WebSocket server port", ), + schedule_mode: str = typer.Option( + "daily", + "--schedule-mode", + help="Scheduler mode: 'daily' or 'intraday'", + ), trigger_time: str = typer.Option( "now", "--trigger-time", "-t", help="Trigger time in LOCAL timezone (HH:MM), or 'now' to run immediately", ), + interval_minutes: int = typer.Option( + 60, + "--interval-minutes", + help="When schedule-mode=intraday, run every N minutes", + ), poll_interval: int = typer.Option( 10, "--poll-interval", @@ -830,9 +996,12 @@ def live( evotraders live # Run immediately (default) evotraders live --mock # Mock mode evotraders live -t 22:30 # Run at 22:30 local time daily + evotraders live --schedule-mode intraday --interval-minutes 60 evotraders live --trigger-time now # Run immediately evotraders live --clean # Clear historical data before starting """ + schedule_mode = str(_normalize_typer_value(schedule_mode, "daily")) + interval_minutes = int(_normalize_typer_value(interval_minutes, 60)) mode_name = "MOCK" if mock else "LIVE" console.print( Panel.fit( @@ -864,6 +1033,16 @@ def live( # Handle historical data cleanup handle_history_cleanup(config_name, auto_clean=clean) + if schedule_mode not in {"daily", "intraday"}: + console.print( + f"[red]Error: unsupported schedule mode '{schedule_mode}'[/red]", + ) + raise typer.Exit(1) + + if interval_minutes <= 0: + console.print("[red]Error: --interval-minutes must be > 0[/red]") + raise typer.Exit(1) + # Convert local time to NYSE time nyse_tz = ZoneInfo("America/New_York") local_tz = datetime.now().astimezone().tzinfo @@ -871,7 +1050,9 @@ def live( nyse_now = datetime.now(nyse_tz) # Convert trigger time from local to NYSE - if trigger_time.lower() == "now": + if schedule_mode == "intraday": + nyse_trigger_time = "now" + elif trigger_time.lower() == "now": nyse_trigger_time = "now" else: local_trigger = datetime.strptime(trigger_time, "%H:%M") @@ -891,7 +1072,10 @@ def live( console.print( f" NYSE Time: {nyse_now.strftime('%Y-%m-%d %H:%M:%S %Z')}", ) - if nyse_trigger_time == "now": + console.print(f" Schedule: {schedule_mode}") + if schedule_mode == "intraday": + console.print(f" Interval: every {interval_minutes} minute(s)") + elif nyse_trigger_time == "now": console.print(" Trigger: [green]NOW (immediate)[/green]") else: console.print( @@ -951,10 +1135,14 @@ def live( host, "--port", str(port), + "--schedule-mode", + schedule_mode, "--poll-interval", str(poll_interval), "--trigger-time", nyse_trigger_time, + "--interval-minutes", + str(interval_minutes), ] if mock: diff --git a/backend/main.py b/backend/main.py index da0e640..41e8354 100644 --- a/backend/main.py +++ b/backend/main.py @@ -48,6 +48,9 @@ def _resolve_runtime_config(args) -> dict: project_root=project_root, config_name=args.config_name, enable_memory=args.enable_memory, + schedule_mode=args.schedule_mode, + interval_minutes=args.interval_minutes, + trigger_time=args.trigger_time, ) @@ -261,6 +264,7 @@ async def run_with_gateway(args): # Create scheduler callback scheduler_callback = None trading_dates = [] + live_scheduler = None if is_backtest: backtest_scheduler = BacktestScheduler( @@ -276,10 +280,11 @@ async def run_with_gateway(args): scheduler_callback = scheduler_callback_fn else: - # Live mode: use daily scheduler with NYSE timezone + # Live mode: use daily or intraday scheduler with NYSE timezone live_scheduler = Scheduler( - mode="daily", - trigger_time=args.trigger_time, + mode=runtime_config["schedule_mode"], + trigger_time=runtime_config["trigger_time"], + interval_minutes=runtime_config["interval_minutes"], config={"config_name": config_name}, ) @@ -300,11 +305,15 @@ async def run_with_gateway(args): "backtest_mode": is_backtest, "tickers": tickers, "config_name": config_name, + "schedule_mode": runtime_config["schedule_mode"], + "interval_minutes": runtime_config["interval_minutes"], + "trigger_time": runtime_config["trigger_time"], "initial_cash": initial_cash, "margin_requirement": margin_requirement, "max_comm_cycles": runtime_config["max_comm_cycles"], "enable_memory": runtime_config["enable_memory"], }, + scheduler=live_scheduler if not is_backtest else None, ) if is_backtest: @@ -325,7 +334,13 @@ def main(): parser.add_argument("--config-name", default="mock") parser.add_argument("--host", default="0.0.0.0") parser.add_argument("--port", type=int, default=8765) + parser.add_argument( + "--schedule-mode", + choices=["daily", "intraday"], + default="daily", + ) parser.add_argument("--trigger-time", default="09:30") # NYSE market open + parser.add_argument("--interval-minutes", type=int, default=60) parser.add_argument("--poll-interval", type=int, default=10) parser.add_argument("--start-date") parser.add_argument("--end-date") diff --git a/backend/services/gateway.py b/backend/services/gateway.py index a6f0299..5c1c54e 100644 --- a/backend/services/gateway.py +++ b/backend/services/gateway.py @@ -13,9 +13,13 @@ import websockets from websockets.asyncio.server import ServerConnection from backend.config.bootstrap_config import ( + get_bootstrap_config_for_run, resolve_runtime_config, update_bootstrap_values_for_run, ) +from backend.agents.agent_workspace import load_agent_workspace_config +from backend.agents.skills_manager import SkillsManager +from backend.agents.toolkit_factory import load_agent_profiles from backend.data.provider_utils import normalize_symbol from backend.data.market_ingest import ingest_symbols from backend.enrich.llm_enricher import llm_enrichment_enabled @@ -23,6 +27,7 @@ from backend.enrich.news_enricher import enrich_news_for_symbol from backend.explain.range_explainer import build_range_explanation from backend.explain.similarity_service import find_similar_days from backend.explain.story_service import get_or_create_stock_story +from backend.llm.models import get_agent_model_info from backend.utils.msg_adapter import FrontendAdapter from backend.utils.terminal_dashboard import get_dashboard from backend.core.pipeline import TradingPipeline @@ -32,8 +37,19 @@ from backend.services.storage import StorageService from backend.data.provider_router import get_provider_router from backend.tools.data_tools import get_prices from backend.tools.data_tools import get_company_news +from backend.core.scheduler import Scheduler logger = logging.getLogger(__name__) +EDITABLE_AGENT_WORKSPACE_FILES = { + "SOUL.md", + "PROFILE.md", + "AGENTS.md", + "MEMORY.md", + "POLICY.md", + "HEARTBEAT.md", + "ROLE.md", + "STYLE.md", +} class Gateway: @@ -46,12 +62,14 @@ class Gateway: pipeline: TradingPipeline, state_sync: Optional[StateSync] = None, scheduler_callback: Optional[Callable] = None, + scheduler: Optional[Scheduler] = None, config: Dict[str, Any] = None, ): self.market_service = market_service self.storage = storage_service self.pipeline = pipeline self.scheduler_callback = scheduler_callback + self.scheduler = scheduler self.config = config or {} self.mode = self.config.get("mode", "live") @@ -67,7 +85,9 @@ class Gateway: self.connected_clients: Set[ServerConnection] = set() self.lock = asyncio.Lock() + self._cycle_lock = asyncio.Lock() self._backtest_task: Optional[asyncio.Task] = None + self._manual_cycle_task: Optional[asyncio.Task] = None self._backtest_start_date: Optional[str] = None self._backtest_end_date: Optional[str] = None self._dashboard = get_dashboard() @@ -116,6 +136,9 @@ class Gateway: "runtime_config", { "tickers": self.config.get("tickers", []), + "schedule_mode": self.config.get("schedule_mode", "daily"), + "interval_minutes": self.config.get("interval_minutes", 60), + "trigger_time": self.config.get("trigger_time", "09:30"), "initial_cash": self.config.get( "initial_cash", self.storage.initial_cash, @@ -150,7 +173,9 @@ class Gateway: await self.market_service.start(broadcast_func=self.broadcast) - if self.scheduler_callback: + if self.scheduler: + await self.scheduler.start(self.on_strategy_trigger) + elif self.scheduler_callback: await self.scheduler_callback(callback=self.on_strategy_trigger) # Start market status monitoring (only for live mode) @@ -261,10 +286,34 @@ class Gateway: await self._send_initial_state(websocket) elif msg_type == "start_backtest": await self._handle_start_backtest(data) + elif msg_type == "trigger_strategy": + await self._handle_manual_trigger(websocket, data) + elif msg_type == "update_runtime_config": + await self._handle_update_runtime_config(websocket, data) elif msg_type == "reload_runtime_assets": await self._handle_reload_runtime_assets() elif msg_type == "update_watchlist": await self._handle_update_watchlist(websocket, data) + elif msg_type == "get_agent_skills": + await self._handle_get_agent_skills(websocket, data) + elif msg_type == "get_agent_profile": + await self._handle_get_agent_profile(websocket, data) + elif msg_type == "get_skill_detail": + await self._handle_get_skill_detail(websocket, data) + elif msg_type == "create_agent_local_skill": + await self._handle_create_agent_local_skill(websocket, data) + elif msg_type == "update_agent_local_skill": + await self._handle_update_agent_local_skill(websocket, data) + elif msg_type == "delete_agent_local_skill": + await self._handle_delete_agent_local_skill(websocket, data) + elif msg_type == "remove_agent_skill": + await self._handle_remove_agent_skill(websocket, data) + elif msg_type == "update_agent_skill": + await self._handle_update_agent_skill(websocket, data) + elif msg_type == "get_agent_workspace_file": + await self._handle_get_agent_workspace_file(websocket, data) + elif msg_type == "update_agent_workspace_file": + await self._handle_update_agent_workspace_file(websocket, data) elif msg_type == "get_stock_history": await self._handle_get_stock_history(websocket, data) elif msg_type == "get_stock_explain_events": @@ -1036,6 +1085,53 @@ class Gateway: task.add_done_callback(self._handle_backtest_exception) self._backtest_task = task + async def _handle_manual_trigger( + self, + websocket: ServerConnection, + data: Dict[str, Any], + ) -> None: + """Run one live/mock trading cycle on demand.""" + if self.is_backtest: + await websocket.send( + json.dumps( + { + "type": "error", + "message": "Manual trigger is only available in live/mock mode.", + }, + ensure_ascii=False, + ), + ) + return + + if ( + self._cycle_lock.locked() + or ( + self._manual_cycle_task is not None + and not self._manual_cycle_task.done() + ) + ): + await websocket.send( + json.dumps( + { + "type": "error", + "message": "A trading cycle is already running.", + }, + ensure_ascii=False, + ), + ) + await self.state_sync.on_system_message("已有任务在运行,已忽略手动触发") + return + + requested_date = data.get("date") + await self.state_sync.on_system_message("收到手动触发请求,准备开始新一轮分析与决策") + task = asyncio.create_task( + self.on_strategy_trigger( + date=requested_date or datetime.now().strftime("%Y-%m-%d"), + ), + ) + task.add_done_callback(self._handle_manual_cycle_exception) + self._manual_cycle_task = task + async def _handle_reload_runtime_assets(self): """Reload prompt, skills, and safe runtime config without restart.""" config_name = self.config.get("config_name", "default") @@ -1043,6 +1139,9 @@ class Gateway: project_root=self._project_root, config_name=config_name, enable_memory=self.config.get("enable_memory", False), + schedule_mode=self.config.get("schedule_mode", "daily"), + interval_minutes=self.config.get("interval_minutes", 60), + trigger_time=self.config.get("trigger_time", "09:30"), ) result = self.pipeline.reload_runtime_assets(runtime_config=runtime_config) runtime_updates = self._apply_runtime_config(runtime_config) @@ -1057,6 +1156,107 @@ class Gateway: }, ) + async def _handle_update_runtime_config( + self, + websocket: ServerConnection, + data: Dict[str, Any], + ) -> None: + """Persist selected runtime settings and hot-reload them.""" + updates: Dict[str, Any] = {} + + schedule_mode = str(data.get("schedule_mode", "")).strip().lower() + if schedule_mode: + if schedule_mode not in {"daily", "intraday"}: + await websocket.send( + json.dumps( + { + "type": "error", + "message": "schedule_mode must be 'daily' or 'intraday'.", + }, + ensure_ascii=False, + ), + ) + return + updates["schedule_mode"] = schedule_mode + + interval_minutes = data.get("interval_minutes") + if interval_minutes is not None: + try: + parsed_interval = int(interval_minutes) + except (TypeError, ValueError): + parsed_interval = 0 + if parsed_interval <= 0: + await websocket.send( + json.dumps( + { + "type": "error", + "message": "interval_minutes must be a positive integer.", + }, + ensure_ascii=False, + ), + ) + return + updates["interval_minutes"] = parsed_interval + + trigger_time = data.get("trigger_time") + if trigger_time is not None: + raw_trigger = str(trigger_time).strip() + if raw_trigger and raw_trigger != "now": + try: + datetime.strptime(raw_trigger, "%H:%M") + except ValueError: + await websocket.send( + json.dumps( + { + "type": "error", + "message": "trigger_time must use HH:MM or 'now'.", + }, + ensure_ascii=False, + ), + ) + return + updates["trigger_time"] = raw_trigger or "09:30" + + max_comm_cycles = data.get("max_comm_cycles") + if max_comm_cycles is not None: + try: + parsed_cycles = int(max_comm_cycles) + except (TypeError, ValueError): + parsed_cycles = 0 + if parsed_cycles <= 0: + await websocket.send( + json.dumps( + { + "type": "error", + "message": "max_comm_cycles must be a positive integer.", + }, + ensure_ascii=False, + ), + ) + return + updates["max_comm_cycles"] = parsed_cycles + + if not updates: + await websocket.send( + json.dumps( + { + "type": "error", + "message": "No runtime settings were provided.", + }, + ensure_ascii=False, + ), + ) + return + + config_name = self.config.get("config_name", "default") + update_bootstrap_values_for_run( + project_root=self._project_root, + config_name=config_name, + updates=updates, + ) + await self.state_sync.on_system_message("运行时调度配置已保存,正在热更新") + await self._handle_reload_runtime_assets() + async def _handle_update_watchlist( self, websocket: ServerConnection, @@ -1095,6 +1295,598 @@ class Gateway: await self._handle_reload_runtime_assets() self._schedule_watchlist_market_store_refresh(tickers) + async def _handle_get_agent_skills( + self, + websocket: ServerConnection, + data: Dict[str, Any], + ) -> None: + """Return skill catalog and status for one agent.""" + agent_id = str(data.get("agent_id", "")).strip() + if not agent_id: + await websocket.send( + json.dumps( + { + "type": "error", + "message": "get_agent_skills requires agent_id.", + }, + ensure_ascii=False, + ), + ) + return + + config_name = self.config.get("config_name", "default") + skills_manager = SkillsManager(project_root=self._project_root) + agent_asset_dir = skills_manager.get_agent_asset_dir(config_name, agent_id) + agent_config = load_agent_workspace_config(agent_asset_dir / "agent.yaml") + resolved_skills = set( + skills_manager.resolve_agent_skill_names( + config_name=config_name, + agent_id=agent_id, + default_skills=[], + ), + ) + enabled = set(agent_config.enabled_skills) + disabled = set(agent_config.disabled_skills) + + payload = [] + for item in skills_manager.list_agent_skill_catalog(config_name, agent_id): + if item.skill_name in disabled: + status = "disabled" + elif item.skill_name in enabled: + status = "enabled" + elif item.skill_name in resolved_skills: + status = "active" + else: + status = "available" + payload.append( + { + "skill_name": item.skill_name, + "name": item.name, + "description": item.description, + "version": item.version, + "source": item.source, + "tools": item.tools, + "status": status, + }, + ) + + await websocket.send( + json.dumps( + { + "type": "agent_skills_loaded", + "config_name": config_name, + "agent_id": agent_id, + "skills": payload, + }, + ensure_ascii=False, + ), + ) + + async def _handle_get_agent_profile( + self, + websocket: ServerConnection, + data: Dict[str, Any], + ) -> None: + """Return structured profile/config summary for one agent.""" + agent_id = str(data.get("agent_id", "")).strip() + if not agent_id: + await websocket.send( + json.dumps( + { + "type": "error", + "message": "get_agent_profile requires agent_id.", + }, + ensure_ascii=False, + ), + ) + return + + config_name = self.config.get("config_name", "default") + skills_manager = SkillsManager(project_root=self._project_root) + asset_dir = skills_manager.get_agent_asset_dir(config_name, agent_id) + agent_config = load_agent_workspace_config(asset_dir / "agent.yaml") + profiles = load_agent_profiles() + profile = profiles.get(agent_id, {}) + bootstrap = get_bootstrap_config_for_run(self._project_root, config_name) + override = bootstrap.agent_override(agent_id) + active_tool_groups = override.get( + "active_tool_groups", + agent_config.active_tool_groups or profile.get("active_tool_groups", []), + ) + if not isinstance(active_tool_groups, list): + active_tool_groups = [] + disabled_tool_groups = agent_config.disabled_tool_groups + if disabled_tool_groups: + disabled_set = set(disabled_tool_groups) + active_tool_groups = [ + group_name + for group_name in active_tool_groups + if group_name not in disabled_set + ] + + default_skills = profile.get("skills", []) + if not isinstance(default_skills, list): + default_skills = [] + resolved_skills = skills_manager.resolve_agent_skill_names( + config_name=config_name, + agent_id=agent_id, + default_skills=default_skills, + ) + prompt_files = agent_config.prompt_files or [ + "SOUL.md", + "PROFILE.md", + "AGENTS.md", + "POLICY.md", + "MEMORY.md", + ] + model_name, model_provider = get_agent_model_info(agent_id) + + await websocket.send( + json.dumps( + { + "type": "agent_profile_loaded", + "config_name": config_name, + "agent_id": agent_id, + "profile": { + "model_name": model_name, + "model_provider": model_provider, + "prompt_files": prompt_files, + "default_skills": default_skills, + "resolved_skills": resolved_skills, + "active_tool_groups": active_tool_groups, + "disabled_tool_groups": disabled_tool_groups, + "enabled_skills": agent_config.enabled_skills, + "disabled_skills": agent_config.disabled_skills, + }, + }, + ensure_ascii=False, + ), + ) + + async def _handle_get_skill_detail( + self, + websocket: ServerConnection, + data: Dict[str, Any], + ) -> None: + """Return full SKILL.md body for one skill.""" + agent_id = str(data.get("agent_id", "")).strip() + skill_name = str(data.get("skill_name", "")).strip() + if not skill_name: + await websocket.send( + json.dumps( + { + "type": "error", + "message": "get_skill_detail requires skill_name.", + }, + ensure_ascii=False, + ), + ) + return + + skills_manager = SkillsManager(project_root=self._project_root) + try: + if agent_id: + config_name = self.config.get("config_name", "default") + detail = skills_manager.load_agent_skill_document( + config_name=config_name, + agent_id=agent_id, + skill_name=skill_name, + ) + else: + detail = skills_manager.load_skill_document(skill_name) + except FileNotFoundError: + await websocket.send( + json.dumps( + { + "type": "error", + "message": f"Unknown skill: {skill_name}", + }, + ensure_ascii=False, + ), + ) + return + + await websocket.send( + json.dumps( + { + "type": "skill_detail_loaded", + "agent_id": agent_id, + "skill": detail, + }, + ensure_ascii=False, + ), + ) + + async def _handle_create_agent_local_skill( + self, + websocket: ServerConnection, + data: Dict[str, Any], + ) -> None: + """Create a new local skill for one agent and hot-reload runtime assets.""" + agent_id = str(data.get("agent_id", "")).strip() + skill_name = str(data.get("skill_name", "")).strip() + if not agent_id or not skill_name: + await websocket.send( + json.dumps( + { + "type": "error", + "message": "create_agent_local_skill requires agent_id and skill_name.", + }, + ensure_ascii=False, + ), + ) + return + + config_name = self.config.get("config_name", "default") + skills_manager = SkillsManager(project_root=self._project_root) + try: + skills_manager.create_agent_local_skill( + config_name=config_name, + agent_id=agent_id, + skill_name=skill_name, + ) + except (ValueError, FileExistsError) as exc: + await websocket.send( + json.dumps( + {"type": "error", "message": str(exc)}, + ensure_ascii=False, + ), + ) + return + + await self.state_sync.on_system_message( + f"Created local skill {skill_name} for {agent_id}", + ) + await self._handle_reload_runtime_assets() + await websocket.send( + json.dumps( + { + "type": "agent_local_skill_created", + "agent_id": agent_id, + "skill_name": skill_name, + }, + ensure_ascii=False, + ), + ) + await self._handle_get_agent_skills(websocket, {"agent_id": agent_id}) + await self._handle_get_skill_detail( + websocket, + {"agent_id": agent_id, "skill_name": skill_name}, + ) + + async def _handle_update_agent_local_skill( + self, + websocket: ServerConnection, + data: Dict[str, Any], + ) -> None: + """Update one agent-local SKILL.md and hot-reload runtime assets.""" + agent_id = str(data.get("agent_id", "")).strip() + skill_name = str(data.get("skill_name", "")).strip() + content = data.get("content") + if not agent_id or not skill_name or not isinstance(content, str): + await websocket.send( + json.dumps( + { + "type": "error", + "message": "update_agent_local_skill requires agent_id, skill_name, and string content.", + }, + ensure_ascii=False, + ), + ) + return + + config_name = self.config.get("config_name", "default") + skills_manager = SkillsManager(project_root=self._project_root) + try: + skills_manager.update_agent_local_skill( + config_name=config_name, + agent_id=agent_id, + skill_name=skill_name, + content=content, + ) + except (ValueError, FileNotFoundError) as exc: + await websocket.send( + json.dumps( + {"type": "error", "message": str(exc)}, + ensure_ascii=False, + ), + ) + return + + await self.state_sync.on_system_message( + f"Updated local skill {skill_name} for {agent_id}", + ) + await self._handle_reload_runtime_assets() + await websocket.send( + json.dumps( + { + "type": "agent_local_skill_updated", + "agent_id": agent_id, + "skill_name": skill_name, + }, + ensure_ascii=False, + ), + ) + await self._handle_get_skill_detail( + websocket, + {"agent_id": agent_id, "skill_name": skill_name}, + ) + + async def _handle_delete_agent_local_skill( + self, + websocket: ServerConnection, + data: Dict[str, Any], + ) -> None: + """Delete one agent-local skill and hot-reload runtime assets.""" + agent_id = str(data.get("agent_id", "")).strip() + skill_name = str(data.get("skill_name", "")).strip() + if not agent_id or not skill_name: + await websocket.send( + json.dumps( + { + "type": "error", + "message": "delete_agent_local_skill requires agent_id and skill_name.", + }, + ensure_ascii=False, + ), + ) + return + + config_name = self.config.get("config_name", "default") + skills_manager = SkillsManager(project_root=self._project_root) + try: + skills_manager.delete_agent_local_skill( + config_name=config_name, + agent_id=agent_id, + skill_name=skill_name, + ) + skills_manager.forget_agent_skill_overrides( + config_name=config_name, + agent_id=agent_id, + skill_names=[skill_name], + ) + except (ValueError, FileNotFoundError) as exc: + await websocket.send( + json.dumps( + {"type": "error", "message": str(exc)}, + ensure_ascii=False, + ), + ) + return + + await self.state_sync.on_system_message( + f"Deleted local skill {skill_name} for {agent_id}", + ) + await self._handle_reload_runtime_assets() + await websocket.send( + json.dumps( + { + "type": "agent_local_skill_deleted", + "agent_id": agent_id, + "skill_name": skill_name, + }, + ensure_ascii=False, + ), + ) + await self._handle_get_agent_skills(websocket, {"agent_id": agent_id}) + + async def _handle_remove_agent_skill( + self, + websocket: ServerConnection, + data: Dict[str, Any], + ) -> None: + """Remove one shared skill from the agent's installed set.""" + agent_id = str(data.get("agent_id", "")).strip() + skill_name = str(data.get("skill_name", "")).strip() + if not agent_id or not skill_name: + await websocket.send( + json.dumps( + { + "type": "error", + "message": "remove_agent_skill requires agent_id and skill_name.", + }, + ensure_ascii=False, + ), + ) + return + + config_name = self.config.get("config_name", "default") + skills_manager = SkillsManager(project_root=self._project_root) + skill_names = { + item.skill_name + for item in skills_manager.list_agent_skill_catalog(config_name, agent_id) + if item.source != "local" + } + if skill_name not in skill_names: + await websocket.send( + json.dumps( + {"type": "error", "message": f"Unknown shared skill: {skill_name}"}, + ensure_ascii=False, + ), + ) + return + + skills_manager.update_agent_skill_overrides( + config_name=config_name, + agent_id=agent_id, + disable=[skill_name], + ) + await self.state_sync.on_system_message( + f"Removed shared skill {skill_name} from {agent_id}", + ) + await self._handle_reload_runtime_assets() + await websocket.send( + json.dumps( + { + "type": "agent_skill_removed", + "agent_id": agent_id, + "skill_name": skill_name, + }, + ensure_ascii=False, + ), + ) + await self._handle_get_agent_skills(websocket, {"agent_id": agent_id}) + + async def _handle_update_agent_skill( + self, + websocket: ServerConnection, + data: Dict[str, Any], + ) -> None: + """Enable or disable one skill for one agent and hot-reload assets.""" + agent_id = str(data.get("agent_id", "")).strip() + skill_name = str(data.get("skill_name", "")).strip() + enabled = data.get("enabled") + if not agent_id or not skill_name or not isinstance(enabled, bool): + await websocket.send( + json.dumps( + { + "type": "error", + "message": "update_agent_skill requires agent_id, skill_name, and boolean enabled.", + }, + ensure_ascii=False, + ), + ) + return + + config_name = self.config.get("config_name", "default") + skills_manager = SkillsManager(project_root=self._project_root) + skill_names = { + item.skill_name + for item in skills_manager.list_agent_skill_catalog(config_name, agent_id) + } + if skill_name not in skill_names: + await websocket.send( + json.dumps( + { + "type": "error", + "message": f"Unknown skill: {skill_name}", + }, + ensure_ascii=False, + ), + ) + return + + if enabled: + skills_manager.update_agent_skill_overrides( + config_name=config_name, + agent_id=agent_id, + enable=[skill_name], + ) + await self.state_sync.on_system_message( + f"Enabled skill {skill_name} for {agent_id}", + ) + else: + skills_manager.update_agent_skill_overrides( + config_name=config_name, + agent_id=agent_id, + disable=[skill_name], + ) + await self.state_sync.on_system_message( + f"Disabled skill {skill_name} for {agent_id}", + ) + + await websocket.send( + json.dumps( + { + "type": "agent_skill_updated", + "agent_id": agent_id, + "skill_name": skill_name, + "enabled": enabled, + }, + ensure_ascii=False, + ), + ) + await self._handle_reload_runtime_assets() + await self._handle_get_agent_skills( + websocket, + {"agent_id": agent_id}, + ) + + async def _handle_get_agent_workspace_file( + self, + websocket: ServerConnection, + data: Dict[str, Any], + ) -> None: + """Load one editable agent workspace markdown file.""" + agent_id = str(data.get("agent_id", "")).strip() + filename = self._normalize_agent_workspace_filename(data.get("filename")) + if not agent_id or not filename: + await websocket.send( + json.dumps( + { + "type": "error", + "message": "get_agent_workspace_file requires agent_id and supported filename.", + }, + ensure_ascii=False, + ), + ) + return + + config_name = self.config.get("config_name", "default") + skills_manager = SkillsManager(project_root=self._project_root) + asset_dir = skills_manager.get_agent_asset_dir(config_name, agent_id) + asset_dir.mkdir(parents=True, exist_ok=True) + path = asset_dir / filename + content = path.read_text(encoding="utf-8") if path.exists() else "" + await websocket.send( + json.dumps( + { + "type": "agent_workspace_file_loaded", + "config_name": config_name, + "agent_id": agent_id, + "filename": filename, + "content": content, + }, + ensure_ascii=False, + ), + ) + + async def _handle_update_agent_workspace_file( + self, + websocket: ServerConnection, + data: Dict[str, Any], + ) -> None: + """Persist one editable agent workspace markdown file and hot-reload.""" + agent_id = str(data.get("agent_id", "")).strip() + filename = self._normalize_agent_workspace_filename(data.get("filename")) + content = data.get("content") + if not agent_id or not filename or not isinstance(content, str): + await websocket.send( + json.dumps( + { + "type": "error", + "message": "update_agent_workspace_file requires agent_id, supported filename, and string content.", + }, + ensure_ascii=False, + ), + ) + return + + config_name = self.config.get("config_name", "default") + skills_manager = SkillsManager(project_root=self._project_root) + asset_dir = skills_manager.get_agent_asset_dir(config_name, agent_id) + asset_dir.mkdir(parents=True, exist_ok=True) + path = asset_dir / filename + path.write_text(content, encoding="utf-8") + await self.state_sync.on_system_message( + f"Updated {filename} for {agent_id}", + ) + await websocket.send( + json.dumps( + { + "type": "agent_workspace_file_updated", + "agent_id": agent_id, + "filename": filename, + }, + ensure_ascii=False, + ), + ) + await self._handle_reload_runtime_assets() + await self._handle_get_agent_workspace_file( + websocket, + {"agent_id": agent_id, "filename": filename}, + ) + @staticmethod def _normalize_watchlist(raw_tickers: Any) -> List[str]: """Parse watchlist payloads from websocket messages.""" @@ -1115,6 +1907,14 @@ class Gateway: tickers.append(symbol) return tickers + @staticmethod + def _normalize_agent_workspace_filename(raw_name: Any) -> Optional[str]: + """Restrict editable workspace files to a safe allowlist.""" + filename = str(raw_name or "").strip() + if filename in EDITABLE_AGENT_WORKSPACE_FILES: + return filename + return None + def _apply_runtime_config( self, runtime_config: Dict[str, Any], @@ -1129,6 +1929,27 @@ class Gateway: self.pipeline.max_comm_cycles = int(runtime_config["max_comm_cycles"]) self.config["max_comm_cycles"] = self.pipeline.max_comm_cycles + self.config["schedule_mode"] = runtime_config.get( + "schedule_mode", + self.config.get("schedule_mode", "daily"), + ) + self.config["interval_minutes"] = int( + runtime_config.get( + "interval_minutes", + self.config.get("interval_minutes", 60), + ), + ) + self.config["trigger_time"] = runtime_config.get( + "trigger_time", + self.config.get("trigger_time", "09:30"), + ) + + if self.scheduler: + self.scheduler.reconfigure( + mode=self.config["schedule_mode"], + trigger_time=self.config["trigger_time"], + interval_minutes=self.config["interval_minutes"], + ) pm_apply_result = self.pipeline.pm.apply_runtime_portfolio_config( margin_requirement=runtime_config["margin_requirement"], @@ -1174,6 +1995,9 @@ class Gateway: "runtime_config_requested": runtime_config, "runtime_config_applied": { "tickers": list(self.config.get("tickers", [])), + "schedule_mode": self.config.get("schedule_mode", "daily"), + "interval_minutes": self.config.get("interval_minutes", 60), + "trigger_time": self.config.get("trigger_time", "09:30"), "initial_cash": self.storage.initial_cash, "margin_requirement": self.config["margin_requirement"], "max_comm_cycles": self.config["max_comm_cycles"], @@ -1181,6 +2005,9 @@ class Gateway: }, "runtime_config_status": { "tickers": True, + "schedule_mode": True, + "interval_minutes": True, + "trigger_time": True, "initial_cash": initial_cash_applied, "margin_requirement": pm_apply_result["margin_requirement"], "max_comm_cycles": True, @@ -1197,6 +2024,9 @@ class Gateway: "runtime_config", { "tickers": self.config.get("tickers", []), + "schedule_mode": self.config.get("schedule_mode", "daily"), + "interval_minutes": self.config.get("interval_minutes", 60), + "trigger_time": self.config.get("trigger_time", "09:30"), "initial_cash": self.storage.initial_cash, "margin_requirement": self.config.get("margin_requirement"), "max_comm_cycles": self.config.get("max_comm_cycles"), @@ -1384,14 +2214,22 @@ class Gateway: async def on_strategy_trigger(self, date: str): """Handle trading cycle trigger""" - logger.info(f"Strategy triggered for {date}") + if self._cycle_lock.locked(): + logger.warning("Trading cycle already running, skipping trigger for %s", date) + await self.state_sync.on_system_message( + f"已有交易周期在运行,跳过本次触发: {date}", + ) + return - tickers = self.config.get("tickers", []) + async with self._cycle_lock: + logger.info(f"Strategy triggered for {date}") - if self.is_backtest: - await self._run_backtest_cycle(date, tickers) - else: - await self._run_live_cycle(date, tickers) + tickers = self.config.get("tickers", []) + + if self.is_backtest: + await self._run_backtest_cycle(date, tickers) + else: + await self._run_live_cycle(date, tickers) async def _run_backtest_cycle(self, date: str, tickers: List[str]): """Run backtest cycle with pre-loaded prices""" @@ -1424,9 +2262,9 @@ class Gateway: Run live cycle with real market timing. - Analysis runs immediately - - Execution waits for market open - (or uses current prices if already open) - - Settlement waits for market close + - Daily mode waits for open/close as before + - Intraday mode executes only during market open + and skips trading outside market hours """ # Get actual trading date (might be next trading day if weekend) trading_date = self.market_service.get_live_trading_date() @@ -1438,17 +2276,40 @@ class Gateway: self._dashboard.update(date=trading_date, status="Analyzing...") market_caps = self._get_market_caps(tickers, trading_date) + schedule_mode = self.config.get("schedule_mode", "daily") + market_status = self.market_service.get_market_status() + current_prices = self.market_service.get_all_prices() - # Run pipeline with async price callbacks - result = await self.pipeline.run_cycle( - tickers=tickers, - date=trading_date, - market_caps=market_caps, - get_open_prices_fn=self.market_service.wait_for_open_prices, - get_close_prices_fn=self.market_service.wait_for_close_prices, - ) + if schedule_mode == "intraday": + execute_decisions = market_status.get("status") == "open" + if execute_decisions: + await self.state_sync.on_system_message( + "定时任务触发:当前处于交易时段,本轮将执行交易决策", + ) + else: + await self.state_sync.on_system_message( + "定时任务触发:当前非交易时段,本轮仅更新数据与分析,不执行交易", + ) + + result = await self.pipeline.run_cycle( + tickers=tickers, + date=trading_date, + prices=current_prices, + market_caps=market_caps, + execute_decisions=execute_decisions, + ) + close_prices = current_prices + else: + # Daily mode keeps the original full-session behavior + result = await self.pipeline.run_cycle( + tickers=tickers, + date=trading_date, + market_caps=market_caps, + get_open_prices_fn=self.market_service.wait_for_open_prices, + get_close_prices_fn=self.market_service.wait_for_close_prices, + ) + close_prices = self.market_service.get_all_prices() - close_prices = self.market_service.get_all_prices() settlement_result = result.get("settlement_result") self._save_cycle_results( result, @@ -1608,6 +2469,21 @@ class Gateway: exc_info=True, ) + def _handle_manual_cycle_exception(self, task: asyncio.Task): + """Handle exceptions from manually-triggered live cycles.""" + self._manual_cycle_task = None + try: + task.result() + except asyncio.CancelledError: + logger.info("Manual cycle task was cancelled") + except Exception as exc: + logger.error( + "Manual cycle task failed with exception:%s:%s", + type(exc).__name__, + exc, + exc_info=True, + ) + def set_backtest_dates(self, dates: List[str]): self.state_sync.set_backtest_dates(dates) if dates: diff --git a/backend/skills/__init__.py b/backend/skills/__init__.py new file mode 100644 index 0000000..40a96af --- /dev/null +++ b/backend/skills/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/backend/skills/builtin/__init__.py b/backend/skills/builtin/__init__.py new file mode 100644 index 0000000..40a96af --- /dev/null +++ b/backend/skills/builtin/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/backend/skills/builtin/fundamental_review/SKILL.md b/backend/skills/builtin/fundamental_review/SKILL.md index 95aab74..936de49 100644 --- a/backend/skills/builtin/fundamental_review/SKILL.md +++ b/backend/skills/builtin/fundamental_review/SKILL.md @@ -1,21 +1,22 @@ --- -name: fundamental_review -description: Review a company from a fundamentals-first perspective before issuing a trading signal. +name: 基本面分析 +description: 当用户要求“基本面分析”“看财务质量”“分析盈利能力”“判断公司质量”或“评估长期盈利韧性”时,应使用此技能。 +version: 1.0.0 --- -# Fundamental Review +# 基本面分析 -Use this skill when the task requires judging business quality, balance-sheet strength, profitability, or long-term earnings durability. +当用户希望从公司质量、资产负债表强度、盈利能力或长期盈利韧性出发判断标的时,使用这个技能。 -## Workflow +## 工作流程 -1. Check profitability, growth, financial health, and efficiency before forming a conclusion. -2. Separate durable business quality from short-term noise. -3. State what would invalidate the thesis. -4. End with a clear signal, confidence, and the main drivers behind that signal. +1. 在形成结论前,先检查盈利能力、成长性、财务健康度和经营效率。 +2. 区分可持续的业务质量和短期噪音。 +3. 明确指出会推翻当前判断的条件。 +4. 最终给出清晰的信号、置信度和主要驱动因素。 -## Guardrails +## 约束 -- Do not rely on one metric in isolation. -- Call out missing data explicitly. -- Prefer conservative conclusions when financial quality is mixed. +- 不要孤立依赖单一指标。 +- 缺失数据要明确指出。 +- 当财务质量优劣混杂时,优先给出保守结论。 diff --git a/backend/skills/builtin/portfolio_decisioning/SKILL.md b/backend/skills/builtin/portfolio_decisioning/SKILL.md index f4ad73e..68d3a71 100644 --- a/backend/skills/builtin/portfolio_decisioning/SKILL.md +++ b/backend/skills/builtin/portfolio_decisioning/SKILL.md @@ -1,21 +1,22 @@ --- -name: portfolio_decisioning -description: Synthesize analyst inputs and risk feedback into explicit portfolio decisions. +name: 组合决策 +description: 当用户要求“组合决策”“给出最终仓位”“整合分析结论”“输出交易决策”或“形成组合操作方案”时,应使用此技能。 +version: 1.0.0 --- -# Portfolio Decisioning +# 组合决策 -Use this skill when you are responsible for converting team analysis into final trades. +当用户需要把团队分析转化为最终交易决策时,使用这个技能。 -## Workflow +## 工作流程 -1. Read analyst conclusions and risk warnings before acting. -2. Evaluate the current portfolio, cash, and margin constraints. -3. Record one explicit decision per ticker using the decision tool. -4. Summarize the portfolio-level rationale after all decisions are recorded. +1. 行动前先阅读分析师结论和风险警示。 +2. 评估当前组合、现金和保证金约束。 +3. 使用决策工具为每个 ticker 记录一个明确决策。 +4. 在全部决策记录完成后,总结组合层面的整体理由。 -## Guardrails +## 约束 -- Position sizing must respect capital and margin limits. -- Prefer smaller size when analyst conviction and risk signals disagree. -- Do not leave a ticker undecided when the task expects a full slate of decisions. +- 仓位大小必须遵守资金和保证金限制。 +- 当分析师信心与风险信号不一致时,优先采用更小仓位。 +- 当任务要求完整决策清单时,不要让任何 ticker 处于未决状态。 diff --git a/backend/skills/builtin/risk_review/SKILL.md b/backend/skills/builtin/risk_review/SKILL.md index da4f93a..cee6fcb 100644 --- a/backend/skills/builtin/risk_review/SKILL.md +++ b/backend/skills/builtin/risk_review/SKILL.md @@ -1,21 +1,22 @@ --- -name: risk_review -description: Assess portfolio and market risks before final position sizing and execution. +name: 风险审查 +description: 当用户要求“风险审查”“看组合风险”“检查集中度”“评估波动风险”或“确认仓位风险边界”时,应使用此技能。 +version: 1.0.0 --- -# Risk Review +# 风险审查 -Use this skill when you must identify concentration, volatility, leverage, and scenario risks. +当用户需要识别集中度、波动率、杠杆和情景风险时,使用这个技能。 -## Workflow +## 工作流程 -1. Review the proposed exposure by ticker and theme. -2. Identify concentration, volatility, liquidity, and leverage concerns. -3. Rank warnings by severity. -4. Translate risk findings into concrete limits or cautions for the portfolio manager. +1. 按 ticker 和主题检查拟议敞口。 +2. 识别集中度、波动率、流动性和杠杆方面的风险点。 +3. 按严重程度排序风险警示。 +4. 将风险结论转化为给投资经理的具体限制或注意事项。 -## Guardrails +## 约束 -- Focus on actionable risk controls. -- Quantify limits when the available data supports it. -- Distinguish fatal blockers from manageable risks. +- 聚焦可执行的风险控制措施。 +- 当数据支持时尽量量化限制。 +- 明确区分致命阻断项和可管理风险。 diff --git a/backend/skills/builtin/sentiment_review/SKILL.md b/backend/skills/builtin/sentiment_review/SKILL.md index 71db6d5..313cf78 100644 --- a/backend/skills/builtin/sentiment_review/SKILL.md +++ b/backend/skills/builtin/sentiment_review/SKILL.md @@ -1,21 +1,22 @@ --- -name: sentiment_review -description: Analyze news flow, market psychology, and insider behavior for catalyst-driven signals. +name: 情绪分析 +description: 当用户要求“情绪分析”“看新闻情绪”“分析市场心理”“判断事件驱动信号”或“检查内幕行为”时,应使用此技能。 +version: 1.0.0 --- -# Sentiment Review +# 情绪分析 -Use this skill when the task depends on recent catalysts, news tone, or behavioral market signals. +当用户需要基于近期催化剂、新闻语气或行为层面的市场信号做判断时,使用这个技能。 -## Workflow +## 工作流程 -1. Review recent news and identify the dominant narrative. -2. Check insider activity for confirming or conflicting signals. -3. Separate durable sentiment shifts from transient noise. -4. Explain how sentiment changes the near-term trade outlook. +1. 回顾近期新闻并识别主导叙事。 +2. 检查内幕活动,寻找确认或冲突信号。 +3. 区分可持续的情绪变化和短暂噪音。 +4. 说明情绪如何改变短期交易展望。 -## Guardrails +## 约束 -- Do not confuse attention with conviction. -- Highlight when sentiment is strong but unsupported by fundamentals. -- Be explicit about catalyst timing risk. +- 不要把注意力误判为真实信念。 +- 当情绪很强但缺乏基本面支持时,要明确指出。 +- 对催化剂时间窗口风险要说清楚。 diff --git a/backend/skills/builtin/technical_review/SKILL.md b/backend/skills/builtin/technical_review/SKILL.md index 329c0a8..1cad414 100644 --- a/backend/skills/builtin/technical_review/SKILL.md +++ b/backend/skills/builtin/technical_review/SKILL.md @@ -1,21 +1,22 @@ --- -name: technical_review -description: Evaluate price action, momentum, and volatility to judge timing and market regime. +name: 技术分析 +description: 当用户要求“技术分析”“看走势”“判断入场时机”“分析动量”“评估波动率”或“判断市场状态”时,应使用此技能。 +version: 1.0.0 --- -# Technical Review +# 技术分析 -Use this skill when the task is sensitive to entry timing, trend quality, or short-term market structure. +当用户需要从入场时机、趋势质量或短期市场结构出发判断标的时,使用这个技能。 -## Workflow +## 工作流程 -1. Assess trend direction and strength. -2. Check momentum and mean-reversion conditions. -3. Review volatility before making aggressive recommendations. -4. Convert the setup into a trading view with explicit risk awareness. +1. 评估趋势方向和强度。 +2. 检查动量与均值回归条件。 +3. 在给出激进建议前先审视波动率。 +4. 将当前形态转化为带有明确风险意识的交易观点。 -## Guardrails +## 约束 -- Distinguish trend continuation from overshoot. -- Avoid strong conviction when signals conflict. -- Treat volatility as a sizing input, not only a directional input. +- 区分趋势延续和过度透支。 +- 当信号冲突时避免给出高确定性判断。 +- 将波动率视为仓位输入,而不仅仅是方向输入。 diff --git a/backend/skills/builtin/valuation_review/SKILL.md b/backend/skills/builtin/valuation_review/SKILL.md index 9cfa1ff..178f0cc 100644 --- a/backend/skills/builtin/valuation_review/SKILL.md +++ b/backend/skills/builtin/valuation_review/SKILL.md @@ -1,21 +1,31 @@ --- -name: valuation_review -description: Estimate fair value and margin of safety using multiple valuation lenses. +name: 估值分析 +description: 当用户要求“估值分析”“看合理价值”“判断高估低估”“测算安全边际”或“比较多种估值方法”时,应使用此技能。 +version: 1.0.0 --- -# Valuation Review +# 估值分析 -Use this skill when the task requires determining whether a stock is cheap, expensive, or fairly priced. +当用户需要判断一只股票是低估、高估还是定价合理时,使用这个技能。 -## Workflow +## 工作流程 -1. Use more than one valuation method when possible. -2. Compare intrinsic value estimates with current market pricing. -3. Explain the key assumptions behind the valuation view. -4. State the margin of safety and what could compress or expand it. +1. 条件允许时,使用不止一种估值方法。 +2. 对比内在价值估计与当前市场价格。 +3. 解释估值判断背后的关键假设。 +4. 明确安全边际,以及哪些因素会压缩或扩大它。 -## Guardrails +## 可复用资源 -- Treat valuation as a range, not a single precise number. -- Call out assumption sensitivity. -- Avoid high-confidence calls when inputs are sparse or unstable. +- `scripts/dcf_report.py` + 用于贴现现金流估值的确定性计算和报告生成。 +- `scripts/owner_earnings_report.py` + 用于 owner earnings 估值的确定性计算和报告生成。 +- `scripts/multiple_valuation_report.py` + 用于 EV/EBITDA 和 Residual Income 两类估值报告生成。 + +## 约束 + +- 将估值视为区间,而不是一个精确点值。 +- 明确说明假设敏感性。 +- 当输入稀疏或不稳定时,避免给出高置信度判断。 diff --git a/backend/skills/builtin/valuation_review/__init__.py b/backend/skills/builtin/valuation_review/__init__.py new file mode 100644 index 0000000..40a96af --- /dev/null +++ b/backend/skills/builtin/valuation_review/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/backend/skills/builtin/valuation_review/scripts/__init__.py b/backend/skills/builtin/valuation_review/scripts/__init__.py new file mode 100644 index 0000000..40a96af --- /dev/null +++ b/backend/skills/builtin/valuation_review/scripts/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/backend/skills/builtin/valuation_review/scripts/dcf_report.py b/backend/skills/builtin/valuation_review/scripts/dcf_report.py new file mode 100644 index 0000000..c4cb4cd --- /dev/null +++ b/backend/skills/builtin/valuation_review/scripts/dcf_report.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +"""Deterministic DCF report helpers for the valuation_review skill.""" + +from __future__ import annotations + +import json +from typing import Iterable + + +def build_dcf_report(rows: Iterable[dict], current_date: str) -> str: + """Render a DCF valuation report from normalized row inputs.""" + lines = [f"=== DCF Valuation Analysis ({current_date}) ===\n"] + + for row in rows: + error = row.get("error") + ticker = row["ticker"] + if error: + lines.append(f"{ticker}: {error}\n") + continue + + current_fcf = float(row["current_fcf"]) + growth_rate = float(row["growth_rate"]) + market_cap = float(row["market_cap"]) + discount_rate = float(row.get("discount_rate", 0.10)) + terminal_growth = float(row.get("terminal_growth", 0.03)) + num_years = int(row.get("num_years", 5)) + + pv_fcf = sum( + current_fcf + * (1 + growth_rate) ** year + / (1 + discount_rate) ** year + for year in range(1, num_years + 1) + ) + terminal_fcf = ( + current_fcf + * (1 + growth_rate) ** num_years + * (1 + terminal_growth) + ) + terminal_value = terminal_fcf / (discount_rate - terminal_growth) + pv_terminal = terminal_value / (1 + discount_rate) ** num_years + enterprise_value = pv_fcf + pv_terminal + value_gap = (enterprise_value - market_cap) / market_cap * 100 + + if value_gap > 20: + assessment = "SIGNIFICANTLY UNDERVALUED" + elif value_gap > 0: + assessment = "POTENTIALLY UNDERVALUED" + elif value_gap > -20: + assessment = "POTENTIALLY OVERVALUED" + else: + assessment = "SIGNIFICANTLY OVERVALUED" + + lines.append(f"{ticker}:") + lines.append(f" Current FCF: ${current_fcf:,.0f}") + lines.append(f" DCF Enterprise Value: ${enterprise_value:,.0f}") + lines.append(f" Market Cap: ${market_cap:,.0f}") + lines.append(f" Value Gap: {value_gap:+.1f}% -> {assessment}") + lines.append("") + + return "\n".join(lines) + + +def main() -> None: + """Read normalized rows from stdin and emit a text report.""" + payload = json.load(__import__("sys").stdin) + print(build_dcf_report(payload["rows"], payload["current_date"])) + + +if __name__ == "__main__": + main() diff --git a/backend/skills/builtin/valuation_review/scripts/multiple_valuation_report.py b/backend/skills/builtin/valuation_review/scripts/multiple_valuation_report.py new file mode 100644 index 0000000..83fc19d --- /dev/null +++ b/backend/skills/builtin/valuation_review/scripts/multiple_valuation_report.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +"""Deterministic multiple-based valuation helpers for the valuation_review skill.""" + +from __future__ import annotations + +import json +from typing import Iterable + + +def build_ev_ebitda_report(rows: Iterable[dict], current_date: str) -> str: + """Render an EV/EBITDA valuation report from normalized row inputs.""" + lines = [f"=== EV/EBITDA Valuation ({current_date}) ===\n"] + + for row in rows: + error = row.get("error") + ticker = row["ticker"] + if error: + lines.append(f"{ticker}: {error}\n") + continue + + current_multiple = float(row["current_multiple"]) + median_multiple = float(row["median_multiple"]) + current_ebitda = float(row["current_ebitda"]) + market_cap = float(row["market_cap"]) + net_debt = float(row["net_debt"]) + + implied_ev = median_multiple * current_ebitda + implied_equity = max(implied_ev - net_debt, 0.0) + value_gap = ( + (implied_equity - market_cap) / market_cap * 100 + if market_cap > 0 + else 0.0 + ) + multiple_discount = ( + (median_multiple - current_multiple) / median_multiple * 100 + ) + + if multiple_discount > 10: + assessment = "TRADING BELOW HISTORICAL MULTIPLE" + elif multiple_discount > -10: + assessment = "NEAR HISTORICAL AVERAGE" + else: + assessment = "TRADING ABOVE HISTORICAL MULTIPLE" + + lines.append(f"{ticker}:") + lines.append(f" Current EV/EBITDA: {current_multiple:.1f}x") + lines.append(f" Historical Median: {median_multiple:.1f}x") + lines.append(f" Multiple vs History: {multiple_discount:+.1f}%") + lines.append(f" Implied Equity Value: ${implied_equity:,.0f}") + lines.append(f" Value Gap: {value_gap:+.1f}% -> {assessment}") + lines.append("") + + return "\n".join(lines) + + +def build_residual_income_report(rows: Iterable[dict], current_date: str) -> str: + """Render a residual income valuation report from normalized row inputs.""" + lines = [f"=== Residual Income Valuation ({current_date}) ===\n"] + + for row in rows: + error = row.get("error") + ticker = row["ticker"] + if error: + lines.append(f"{ticker}: {error}\n") + continue + + book_value = float(row["book_value"]) + initial_ri = float(row["initial_ri"]) + market_cap = float(row["market_cap"]) + cost_of_equity = float(row.get("cost_of_equity", 0.10)) + bv_growth = float(row.get("bv_growth", 0.03)) + terminal_growth = float(row.get("terminal_growth", 0.03)) + num_years = int(row.get("num_years", 5)) + margin_of_safety = float(row.get("margin_of_safety", 0.20)) + + pv_ri = sum( + initial_ri * (1 + bv_growth) ** year / (1 + cost_of_equity) ** year + for year in range(1, num_years + 1) + ) + terminal_ri = initial_ri * (1 + bv_growth) ** (num_years + 1) + terminal_value = terminal_ri / (cost_of_equity - terminal_growth) + pv_terminal = terminal_value / (1 + cost_of_equity) ** num_years + intrinsic_value = (book_value + pv_ri + pv_terminal) * ( + 1 - margin_of_safety + ) + value_gap = (intrinsic_value - market_cap) / market_cap * 100 + + lines.append(f"{ticker}:") + lines.append(f" Book Value: ${book_value:,.0f}") + lines.append(f" Residual Income: ${initial_ri:,.0f}") + lines.append( + f" Intrinsic Value (w/ 20% MoS): ${intrinsic_value:,.0f}", + ) + lines.append(f" Value Gap: {value_gap:+.1f}%") + lines.append("") + + return "\n".join(lines) + + +def main() -> None: + """Read normalized rows from stdin and emit one selected text report.""" + payload = json.load(__import__("sys").stdin) + mode = payload["mode"] + if mode == "ev_ebitda": + print(build_ev_ebitda_report(payload["rows"], payload["current_date"])) + return + if mode == "residual_income": + print(build_residual_income_report(payload["rows"], payload["current_date"])) + return + raise ValueError(f"Unsupported mode: {mode}") + + +if __name__ == "__main__": + main() diff --git a/backend/skills/builtin/valuation_review/scripts/owner_earnings_report.py b/backend/skills/builtin/valuation_review/scripts/owner_earnings_report.py new file mode 100644 index 0000000..bd02845 --- /dev/null +++ b/backend/skills/builtin/valuation_review/scripts/owner_earnings_report.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +"""Deterministic owner earnings valuation helpers for the valuation_review skill.""" + +from __future__ import annotations + +import json +from typing import Iterable + + +def build_owner_earnings_report(rows: Iterable[dict], current_date: str) -> str: + """Render an owner earnings valuation report from normalized row inputs.""" + lines = [f"=== Owner Earnings Valuation ({current_date}) ===\n"] + + for row in rows: + error = row.get("error") + ticker = row["ticker"] + if error: + lines.append(f"{ticker}: {error}\n") + continue + + owner_earnings = float(row["owner_earnings"]) + growth_rate = float(row["growth_rate"]) + market_cap = float(row["market_cap"]) + required_return = float(row.get("required_return", 0.15)) + margin_of_safety = float(row.get("margin_of_safety", 0.25)) + num_years = int(row.get("num_years", 5)) + + pv_earnings = sum( + owner_earnings + * (1 + growth_rate) ** year + / (1 + required_return) ** year + for year in range(1, num_years + 1) + ) + terminal_growth = min(growth_rate, 0.03) + terminal_earnings = ( + owner_earnings + * (1 + growth_rate) ** num_years + * (1 + terminal_growth) + ) + terminal_value = terminal_earnings / ( + required_return - terminal_growth + ) + pv_terminal = terminal_value / (1 + required_return) ** num_years + intrinsic_value = (pv_earnings + pv_terminal) * (1 - margin_of_safety) + value_gap = (intrinsic_value - market_cap) / market_cap * 100 + + if value_gap > 20: + assessment = "SIGNIFICANTLY UNDERVALUED" + elif value_gap > 0: + assessment = "POTENTIALLY UNDERVALUED" + elif value_gap > -20: + assessment = "POTENTIALLY OVERVALUED" + else: + assessment = "SIGNIFICANTLY OVERVALUED" + + lines.append(f"{ticker}:") + lines.append(f" Owner Earnings: ${owner_earnings:,.0f}") + lines.append( + f" Intrinsic Value (w/ 25% MoS): ${intrinsic_value:,.0f}", + ) + lines.append(f" Market Cap: ${market_cap:,.0f}") + lines.append(f" Value Gap: {value_gap:+.1f}% -> {assessment}") + lines.append("") + + return "\n".join(lines) + + +def main() -> None: + """Read normalized rows from stdin and emit a text report.""" + payload = json.load(__import__("sys").stdin) + print(build_owner_earnings_report(payload["rows"], payload["current_date"])) + + +if __name__ == "__main__": + main() diff --git a/backend/skills/customized/portfolio_decisioning/SKILL.md b/backend/skills/customized/portfolio_decisioning/SKILL.md new file mode 100644 index 0000000..444cc21 --- /dev/null +++ b/backend/skills/customized/portfolio_decisioning/SKILL.md @@ -0,0 +1,21 @@ +--- +name: 组合决策 +description: 整合分析师观点与风险反馈,形成明确的组合层决策。 +--- + +# 组合决策 + +当你负责把团队分析转化为最终交易决策时,使用这个技能。 + +## 工作流程 + +1. 行动前先阅读分析师结论和风险警示。 +2. 评估当前组合、现金和保证金约束。 +3. 使用决策工具为每个 ticker 记录一个明确决策。 +4. 在全部决策记录完成后,总结组合层面的整体理由。 + +## 约束 + +- 仓位大小必须遵守资金和保证金限制。 +- 当分析师信心与风险信号不一致时,优先采用更小仓位。 +- 当任务要求完整决策清单时,不要让任何 ticker 处于未决状态。 diff --git a/backend/skills/customized/risk_review/SKILL.md b/backend/skills/customized/risk_review/SKILL.md new file mode 100644 index 0000000..11f0b87 --- /dev/null +++ b/backend/skills/customized/risk_review/SKILL.md @@ -0,0 +1,21 @@ +--- +name: 风险审查 +description: 在最终仓位和执行前,评估组合与市场风险。 +--- + +# 风险审查 + +当你需要识别集中度、波动率、杠杆和情景风险时,使用这个技能。 + +## 工作流程 + +1. 按 ticker 和主题检查拟议敞口。 +2. 识别集中度、波动率、流动性和杠杆方面的风险点。 +3. 按严重程度排序风险警示。 +4. 将风险结论转化为给投资经理的具体限制或注意事项。 + +## 约束 + +- 聚焦可执行的风险控制措施。 +- 当数据支持时尽量量化限制。 +- 明确区分致命阻断项和可管理风险。 diff --git a/backend/skills/customized/sentiment_review/SKILL.md b/backend/skills/customized/sentiment_review/SKILL.md new file mode 100644 index 0000000..2604497 --- /dev/null +++ b/backend/skills/customized/sentiment_review/SKILL.md @@ -0,0 +1,21 @@ +--- +name: 情绪分析 +description: 分析新闻流、市场心理和内幕行为,识别事件驱动型信号。 +--- + +# 情绪分析 + +当任务依赖近期催化剂、新闻语气或行为层面的市场信号时,使用这个技能。 + +## 工作流程 + +1. 回顾近期新闻并识别主导叙事。 +2. 检查内幕活动,寻找确认或冲突信号。 +3. 区分可持续的情绪变化和短暂噪音。 +4. 说明情绪如何改变短期交易展望。 + +## 约束 + +- 不要把注意力误判为真实信念。 +- 当情绪很强但缺乏基本面支持时,要明确指出。 +- 对催化剂时间窗口风险要说清楚。 diff --git a/backend/skills/customized/technical_review/SKILL.md b/backend/skills/customized/technical_review/SKILL.md new file mode 100644 index 0000000..e783c2a --- /dev/null +++ b/backend/skills/customized/technical_review/SKILL.md @@ -0,0 +1,21 @@ +--- +name: 技术分析 +description: 评估价格行为、动量和波动率,用于判断时机和市场状态。 +--- + +# 技术分析 + +当任务对入场时机、趋势质量或短期市场结构敏感时,使用这个技能。 + +## 工作流程 + +1. 评估趋势方向和强度。 +2. 检查动量与均值回归条件。 +3. 在给出激进建议前先审视波动率。 +4. 将当前形态转化为带有明确风险意识的交易观点。 + +## 约束 + +- 区分趋势延续和过度透支。 +- 当信号冲突时避免给出高确定性判断。 +- 将波动率视为仓位输入,而不仅仅是方向输入。 diff --git a/backend/skills/customized/valuation_review/SKILL.md b/backend/skills/customized/valuation_review/SKILL.md new file mode 100644 index 0000000..7eea59e --- /dev/null +++ b/backend/skills/customized/valuation_review/SKILL.md @@ -0,0 +1,21 @@ +--- +name: 估值分析 +description: 使用多种估值视角评估合理价值和安全边际。 +--- + +# 估值分析 + +当任务需要判断一只股票是低估、高估还是定价合理时,使用这个技能。 + +## 工作流程 + +1. 条件允许时,使用不止一种估值方法。 +2. 对比内在价值估计与当前市场价格。 +3. 解释估值判断背后的关键假设。 +4. 明确安全边际,以及哪些因素会压缩或扩大它。 + +## 约束 + +- 将估值视为区间,而不是一个精确点值。 +- 明确说明假设敏感性。 +- 当输入稀疏或不稳定时,避免给出高置信度判断。 diff --git a/backend/tests/test_agent_workspace.py b/backend/tests/test_agent_workspace.py new file mode 100644 index 0000000..24afe70 --- /dev/null +++ b/backend/tests/test_agent_workspace.py @@ -0,0 +1,191 @@ +# -*- coding: utf-8 -*- + +from backend.agents.prompt_factory import build_agent_system_prompt +from backend.agents.skills_manager import SkillsManager +from backend.agents.workspace_manager import WorkspaceManager + + +class _DummyToolkit: + def get_agent_skill_prompt(self): + return "" + + def get_activated_notes(self): + return "" + + +def test_workspace_manager_creates_extended_agent_files(tmp_path): + manager = WorkspaceManager(project_root=tmp_path) + + manager.initialize_default_assets( + config_name="demo", + agent_ids=["risk_manager"], + analyst_personas={}, + ) + + asset_dir = tmp_path / "runs" / "demo" / "agents" / "risk_manager" + assert (asset_dir / "SOUL.md").exists() + assert (asset_dir / "PROFILE.md").exists() + assert (asset_dir / "AGENTS.md").exists() + assert (asset_dir / "MEMORY.md").exists() + assert (asset_dir / "HEARTBEAT.md").exists() + assert (asset_dir / "agent.yaml").exists() + assert (asset_dir / "skills" / "installed").is_dir() + assert (asset_dir / "skills" / "active").is_dir() + assert (asset_dir / "skills" / "disabled").is_dir() + assert (asset_dir / "skills" / "local").is_dir() + + +def test_agent_workspace_config_controls_prompt_files(tmp_path, monkeypatch): + manager = WorkspaceManager(project_root=tmp_path) + manager.initialize_default_assets( + config_name="demo", + agent_ids=["risk_manager"], + analyst_personas={}, + ) + asset_dir = tmp_path / "runs" / "demo" / "agents" / "risk_manager" + (asset_dir / "SOUL.md").write_text("soul-line", encoding="utf-8") + (asset_dir / "PROFILE.md").write_text("profile-line", encoding="utf-8") + (asset_dir / "MEMORY.md").write_text("memory-line", encoding="utf-8") + (asset_dir / "agent.yaml").write_text( + "prompt_files:\n" + " - SOUL.md\n" + " - MEMORY.md\n", + encoding="utf-8", + ) + + from backend.agents import prompt_factory + + monkeypatch.setattr( + prompt_factory, + "SkillsManager", + lambda: SkillsManager(project_root=tmp_path), + ) + + prompt = build_agent_system_prompt( + agent_id="risk_manager", + config_name="demo", + toolkit=_DummyToolkit(), + ) + + assert "soul-line" in prompt + assert "memory-line" in prompt + assert "profile-line" not in prompt + + +def test_skills_manager_applies_agent_level_skill_toggles(tmp_path): + builtin_root = tmp_path / "backend" / "skills" / "builtin" + for skill_name in ("risk_review", "extra_guard"): + skill_dir = builtin_root / skill_name + skill_dir.mkdir(parents=True, exist_ok=True) + (skill_dir / "SKILL.md").write_text( + f"# {skill_name}\n", + encoding="utf-8", + ) + + manager = WorkspaceManager(project_root=tmp_path) + manager.initialize_default_assets( + config_name="demo", + agent_ids=["risk_manager"], + analyst_personas={}, + ) + asset_dir = tmp_path / "runs" / "demo" / "agents" / "risk_manager" + (asset_dir / "agent.yaml").write_text( + "enabled_skills:\n" + " - extra_guard\n" + "disabled_skills:\n" + " - risk_review\n", + encoding="utf-8", + ) + + skills_manager = SkillsManager(project_root=tmp_path) + active_map = skills_manager.prepare_active_skills( + config_name="demo", + agent_defaults={"risk_manager": ["risk_review"]}, + ) + + active_dirs = active_map["risk_manager"] + assert [path.name for path in active_dirs] == ["extra_guard"] + assert (asset_dir / "skills" / "installed" / "extra_guard" / "SKILL.md").exists() + assert (asset_dir / "skills" / "active" / "extra_guard" / "SKILL.md").exists() + assert (asset_dir / "skills" / "disabled" / "risk_review" / "SKILL.md").exists() + assert not (asset_dir / "skills" / "active" / "risk_review").exists() + + +def test_agent_local_skill_is_activated_from_agent_workspace(tmp_path): + manager = WorkspaceManager(project_root=tmp_path) + manager.initialize_default_assets( + config_name="demo", + agent_ids=["risk_manager"], + analyst_personas={}, + ) + asset_dir = tmp_path / "runs" / "demo" / "agents" / "risk_manager" + local_skill = asset_dir / "skills" / "local" / "local_guard" + local_skill.mkdir(parents=True, exist_ok=True) + (local_skill / "SKILL.md").write_text( + "---\nname: 本地风控\ndescription: local skill\nversion: 1.0.0\n---\n", + encoding="utf-8", + ) + + skills_manager = SkillsManager(project_root=tmp_path) + active_map = skills_manager.prepare_active_skills( + config_name="demo", + agent_defaults={"risk_manager": []}, + ) + + assert [path.name for path in active_map["risk_manager"]] == ["local_guard"] + assert (asset_dir / "skills" / "active" / "local_guard" / "SKILL.md").exists() + + +def test_prompt_includes_active_skill_metadata_summary(tmp_path, monkeypatch): + builtin_root = tmp_path / "backend" / "skills" / "builtin" + skill_dir = builtin_root / "extra_guard" + skill_dir.mkdir(parents=True, exist_ok=True) + (skill_dir / "SKILL.md").write_text( + "---\n" + "name: extra_guard\n" + "description: This skill should be used when the user asks to \"run a risk check\".\n" + "version: 1.0.0\n" + "tools:\n" + " - risk_ops\n" + "---\n\n" + "# Extra Guard\n", + encoding="utf-8", + ) + + manager = WorkspaceManager(project_root=tmp_path) + manager.initialize_default_assets( + config_name="demo", + agent_ids=["risk_manager"], + analyst_personas={}, + ) + asset_dir = tmp_path / "runs" / "demo" / "agents" / "risk_manager" + (asset_dir / "agent.yaml").write_text( + "enabled_skills:\n" + " - extra_guard\n", + encoding="utf-8", + ) + + skills_manager = SkillsManager(project_root=tmp_path) + skills_manager.prepare_active_skills( + config_name="demo", + agent_defaults={"risk_manager": []}, + ) + + from backend.agents import prompt_factory + + monkeypatch.setattr( + prompt_factory, + "SkillsManager", + lambda: SkillsManager(project_root=tmp_path), + ) + + prompt = build_agent_system_prompt( + agent_id="risk_manager", + config_name="demo", + toolkit=_DummyToolkit(), + ) + + assert "Active Skill Catalog" in prompt + assert "This skill should be used when the user asks to \"run a risk check\"." in prompt + assert "version: 1.0.0" in prompt + assert "risk_ops" not in prompt diff --git a/backend/tests/test_gateway_explain_handlers.py b/backend/tests/test_gateway_explain_handlers.py index f25feec..0f870c0 100644 --- a/backend/tests/test_gateway_explain_handlers.py +++ b/backend/tests/test_gateway_explain_handlers.py @@ -382,3 +382,341 @@ async def test_refresh_market_store_for_watchlist_emits_system_messages(monkeypa assert gateway.state_sync.system_messages[0] == "正在同步自选股市场数据: AAPL, MSFT" assert "自选股市场数据已同步:" in gateway.state_sync.system_messages[1] assert "AAPL prices=3 news=4" in gateway.state_sync.system_messages[1] + + +@pytest.mark.asyncio +async def test_handle_get_agent_skills_returns_statuses(tmp_path): + builtin_root = tmp_path / "backend" / "skills" / "builtin" + for name in ("risk_review", "extra_guard"): + skill_dir = builtin_root / name + skill_dir.mkdir(parents=True, exist_ok=True) + (skill_dir / "SKILL.md").write_text( + f"---\nname: {name}\ndescription: {name} desc\n---\n", + encoding="utf-8", + ) + + agent_dir = tmp_path / "runs" / "demo" / "agents" / "risk_manager" + agent_dir.mkdir(parents=True, exist_ok=True) + (agent_dir / "agent.yaml").write_text( + "enabled_skills:\n" + " - extra_guard\n" + "disabled_skills:\n" + " - risk_review\n", + encoding="utf-8", + ) + + gateway = make_gateway() + gateway.config["config_name"] = "demo" + gateway._project_root = tmp_path + websocket = DummyWebSocket() + + await gateway._handle_get_agent_skills( + websocket, + {"agent_id": "risk_manager"}, + ) + + assert websocket.messages[-1]["type"] == "agent_skills_loaded" + statuses = { + row["skill_name"]: row["status"] + for row in websocket.messages[-1]["skills"] + } + assert statuses["extra_guard"] == "enabled" + assert statuses["risk_review"] == "disabled" + + +@pytest.mark.asyncio +async def test_handle_get_agent_profile_returns_model_and_tool_groups(monkeypatch, tmp_path): + agent_dir = tmp_path / "runs" / "demo" / "agents" / "risk_manager" + agent_dir.mkdir(parents=True, exist_ok=True) + (agent_dir / "agent.yaml").write_text( + "prompt_files:\n" + " - SOUL.md\n" + " - MEMORY.md\n" + "active_tool_groups:\n" + " - risk_ops\n" + "disabled_tool_groups:\n" + " - legacy_group\n", + encoding="utf-8", + ) + + gateway = make_gateway() + gateway.config["config_name"] = "demo" + gateway._project_root = tmp_path + websocket = DummyWebSocket() + + monkeypatch.setattr( + gateway_module, + "load_agent_profiles", + lambda: {"risk_manager": {"skills": ["risk_review"], "active_tool_groups": ["risk_ops", "legacy_group"]}}, + ) + monkeypatch.setattr( + gateway_module, + "get_agent_model_info", + lambda agent_id: ("gpt-4o-mini", "OPENAI"), + ) + + class _Bootstrap: + @staticmethod + def agent_override(_agent_id): + return {} + + monkeypatch.setattr( + gateway_module, + "get_bootstrap_config_for_run", + lambda project_root, config_name: _Bootstrap(), + ) + + await gateway._handle_get_agent_profile( + websocket, + {"agent_id": "risk_manager"}, + ) + + assert websocket.messages[-1]["type"] == "agent_profile_loaded" + profile = websocket.messages[-1]["profile"] + assert profile["model_name"] == "gpt-4o-mini" + assert profile["model_provider"] == "OPENAI" + assert profile["prompt_files"] == ["SOUL.md", "MEMORY.md"] + assert profile["active_tool_groups"] == ["risk_ops"] + assert profile["disabled_tool_groups"] == ["legacy_group"] + + +@pytest.mark.asyncio +async def test_handle_get_skill_detail_returns_markdown_body(tmp_path): + skill_dir = tmp_path / "backend" / "skills" / "builtin" / "risk_review" + skill_dir.mkdir(parents=True, exist_ok=True) + (skill_dir / "SKILL.md").write_text( + "---\nname: 风险审查\ndescription: 说明\nversion: 1.0.0\n---\n# 风险审查\n\n完整正文\n", + encoding="utf-8", + ) + + gateway = make_gateway() + gateway._project_root = tmp_path + websocket = DummyWebSocket() + + await gateway._handle_get_skill_detail( + websocket, + {"skill_name": "risk_review"}, + ) + + assert websocket.messages[-1]["type"] == "skill_detail_loaded" + assert websocket.messages[-1]["skill"]["name"] == "风险审查" + assert websocket.messages[-1]["skill"]["version"] == "1.0.0" + assert websocket.messages[-1]["skill"]["content"] == "# 风险审查\n\n完整正文" + + +@pytest.mark.asyncio +async def test_handle_get_skill_detail_prefers_agent_local_skill(tmp_path): + skill_dir = tmp_path / "runs" / "demo" / "agents" / "risk_manager" / "skills" / "local" / "local_guard" + skill_dir.mkdir(parents=True, exist_ok=True) + (skill_dir / "SKILL.md").write_text( + "---\nname: 本地风控\ndescription: 本地说明\nversion: 1.0.0\n---\n# 本地风控\n\n本地正文\n", + encoding="utf-8", + ) + + gateway = make_gateway() + gateway.config["config_name"] = "demo" + gateway._project_root = tmp_path + websocket = DummyWebSocket() + + await gateway._handle_get_skill_detail( + websocket, + {"agent_id": "risk_manager", "skill_name": "local_guard"}, + ) + + assert websocket.messages[-1]["type"] == "skill_detail_loaded" + assert websocket.messages[-1]["agent_id"] == "risk_manager" + assert websocket.messages[-1]["skill"]["source"] == "local" + assert websocket.messages[-1]["skill"]["content"] == "# 本地风控\n\n本地正文" + + +@pytest.mark.asyncio +async def test_handle_update_agent_skill_persists_and_returns_refresh(monkeypatch, tmp_path): + skill_dir = tmp_path / "backend" / "skills" / "builtin" / "extra_guard" + skill_dir.mkdir(parents=True, exist_ok=True) + (skill_dir / "SKILL.md").write_text( + "---\nname: extra_guard\ndescription: desc\n---\n", + encoding="utf-8", + ) + + gateway = make_gateway() + gateway.config["config_name"] = "demo" + gateway._project_root = tmp_path + websocket = DummyWebSocket() + + async def _noop_reload(): + return None + + monkeypatch.setattr(gateway, "_handle_reload_runtime_assets", _noop_reload) + + await gateway._handle_update_agent_skill( + websocket, + { + "agent_id": "risk_manager", + "skill_name": "extra_guard", + "enabled": True, + }, + ) + + assert websocket.messages[0]["type"] == "agent_skill_updated" + assert websocket.messages[-1]["type"] == "agent_skills_loaded" + agent_yaml = tmp_path / "runs" / "demo" / "agents" / "risk_manager" / "agent.yaml" + assert "extra_guard" in agent_yaml.read_text(encoding="utf-8") + + +@pytest.mark.asyncio +async def test_handle_create_and_update_agent_local_skill(monkeypatch, tmp_path): + gateway = make_gateway() + gateway.config["config_name"] = "demo" + gateway._project_root = tmp_path + websocket = DummyWebSocket() + + async def _noop_reload(): + return None + + monkeypatch.setattr(gateway, "_handle_reload_runtime_assets", _noop_reload) + + await gateway._handle_create_agent_local_skill( + websocket, + {"agent_id": "risk_manager", "skill_name": "local_guard"}, + ) + + assert websocket.messages[0]["type"] == "agent_local_skill_created" + assert websocket.messages[1]["type"] == "agent_skills_loaded" + assert websocket.messages[2]["type"] == "skill_detail_loaded" + target = tmp_path / "runs" / "demo" / "agents" / "risk_manager" / "skills" / "local" / "local_guard" / "SKILL.md" + assert target.exists() + + websocket.messages.clear() + await gateway._handle_update_agent_local_skill( + websocket, + { + "agent_id": "risk_manager", + "skill_name": "local_guard", + "content": "---\nname: 本地风控\ndescription: 更新后\nversion: 1.0.0\n---\n# 本地风控\n\n更新正文\n", + }, + ) + + assert websocket.messages[0]["type"] == "agent_local_skill_updated" + assert websocket.messages[1]["type"] == "skill_detail_loaded" + assert "更新正文" in target.read_text(encoding="utf-8") + + +@pytest.mark.asyncio +async def test_handle_delete_agent_local_skill(monkeypatch, tmp_path): + skill_dir = tmp_path / "runs" / "demo" / "agents" / "risk_manager" / "skills" / "local" / "local_guard" + skill_dir.mkdir(parents=True, exist_ok=True) + (skill_dir / "SKILL.md").write_text( + "---\nname: 本地风控\ndescription: desc\nversion: 1.0.0\n---\n", + encoding="utf-8", + ) + agent_yaml = tmp_path / "runs" / "demo" / "agents" / "risk_manager" / "agent.yaml" + agent_yaml.parent.mkdir(parents=True, exist_ok=True) + agent_yaml.write_text( + "enabled_skills:\n" + " - local_guard\n" + "disabled_skills:\n" + " - local_guard\n", + encoding="utf-8", + ) + + gateway = make_gateway() + gateway.config["config_name"] = "demo" + gateway._project_root = tmp_path + websocket = DummyWebSocket() + + async def _noop_reload(): + return None + + monkeypatch.setattr(gateway, "_handle_reload_runtime_assets", _noop_reload) + + await gateway._handle_delete_agent_local_skill( + websocket, + {"agent_id": "risk_manager", "skill_name": "local_guard"}, + ) + + assert websocket.messages[0]["type"] == "agent_local_skill_deleted" + assert websocket.messages[1]["type"] == "agent_skills_loaded" + assert not skill_dir.exists() + assert "local_guard" not in agent_yaml.read_text(encoding="utf-8") + + +@pytest.mark.asyncio +async def test_handle_remove_agent_skill_marks_disabled(monkeypatch, tmp_path): + skill_dir = tmp_path / "backend" / "skills" / "builtin" / "risk_review" + skill_dir.mkdir(parents=True, exist_ok=True) + (skill_dir / "SKILL.md").write_text( + "---\nname: 风险审查\ndescription: desc\nversion: 1.0.0\n---\n", + encoding="utf-8", + ) + + gateway = make_gateway() + gateway.config["config_name"] = "demo" + gateway._project_root = tmp_path + websocket = DummyWebSocket() + + async def _noop_reload(): + return None + + monkeypatch.setattr(gateway, "_handle_reload_runtime_assets", _noop_reload) + + await gateway._handle_remove_agent_skill( + websocket, + {"agent_id": "risk_manager", "skill_name": "risk_review"}, + ) + + assert websocket.messages[0]["type"] == "agent_skill_removed" + assert websocket.messages[1]["type"] == "agent_skills_loaded" + agent_yaml = tmp_path / "runs" / "demo" / "agents" / "risk_manager" / "agent.yaml" + assert "risk_review" in agent_yaml.read_text(encoding="utf-8") + + +@pytest.mark.asyncio +async def test_handle_get_agent_workspace_file_returns_content(tmp_path): + file_path = tmp_path / "runs" / "demo" / "agents" / "risk_manager" / "SOUL.md" + file_path.parent.mkdir(parents=True, exist_ok=True) + file_path.write_text("soul content", encoding="utf-8") + + gateway = make_gateway() + gateway.config["config_name"] = "demo" + gateway._project_root = tmp_path + websocket = DummyWebSocket() + + await gateway._handle_get_agent_workspace_file( + websocket, + {"agent_id": "risk_manager", "filename": "SOUL.md"}, + ) + + assert websocket.messages[-1] == { + "type": "agent_workspace_file_loaded", + "config_name": "demo", + "agent_id": "risk_manager", + "filename": "SOUL.md", + "content": "soul content", + } + + +@pytest.mark.asyncio +async def test_handle_update_agent_workspace_file_persists_and_returns_refresh(monkeypatch, tmp_path): + gateway = make_gateway() + gateway.config["config_name"] = "demo" + gateway._project_root = tmp_path + websocket = DummyWebSocket() + + async def _noop_reload(): + return None + + monkeypatch.setattr(gateway, "_handle_reload_runtime_assets", _noop_reload) + + await gateway._handle_update_agent_workspace_file( + websocket, + { + "agent_id": "risk_manager", + "filename": "SOUL.md", + "content": "updated soul", + }, + ) + + assert websocket.messages[0]["type"] == "agent_workspace_file_updated" + assert websocket.messages[-1]["type"] == "agent_workspace_file_loaded" + target = tmp_path / "runs" / "demo" / "agents" / "risk_manager" / "SOUL.md" + assert target.read_text(encoding="utf-8") == "updated soul" diff --git a/backend/tests/test_skills_cli.py b/backend/tests/test_skills_cli.py new file mode 100644 index 0000000..3011b3c --- /dev/null +++ b/backend/tests/test_skills_cli.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +from backend import cli +from backend.agents.skill_metadata import parse_skill_metadata +from backend.agents.skills_manager import SkillsManager + + +def test_parse_skill_metadata_extended_frontmatter(tmp_path): + skill_dir = tmp_path / "demo_skill" + skill_dir.mkdir(parents=True, exist_ok=True) + (skill_dir / "SKILL.md").write_text( + "---\n" + "name: demo_skill\n" + "description: Demo description\n" + "tools:\n" + " - technical\n" + "---\n\n" + "# Demo Skill\n", + encoding="utf-8", + ) + + parsed = parse_skill_metadata(skill_dir, source="builtin") + + assert parsed.skill_name == "demo_skill" + assert parsed.description == "Demo description" + assert parsed.tools == ["technical"] + + +def test_update_agent_skill_overrides(tmp_path): + manager = SkillsManager(project_root=tmp_path) + asset_dir = manager.get_agent_asset_dir("demo", "risk_manager") + asset_dir.mkdir(parents=True, exist_ok=True) + (asset_dir / "agent.yaml").write_text( + "enabled_skills:\n" + " - risk_review\n" + "disabled_skills:\n" + " - old_skill\n", + encoding="utf-8", + ) + + result = manager.update_agent_skill_overrides( + config_name="demo", + agent_id="risk_manager", + enable=["extra_guard"], + disable=["risk_review"], + ) + + assert result["enabled_skills"] == ["extra_guard"] + assert result["disabled_skills"] == ["old_skill", "risk_review"] + + +def test_skills_enable_disable_and_list(monkeypatch, tmp_path): + builtin_root = tmp_path / "backend" / "skills" / "builtin" + for name in ("risk_review", "extra_guard"): + skill_dir = builtin_root / name + skill_dir.mkdir(parents=True, exist_ok=True) + (skill_dir / "SKILL.md").write_text( + f"---\nname: {name}\ndescription: {name} desc\n---\n", + encoding="utf-8", + ) + + printed = [] + monkeypatch.setattr(cli, "get_project_root", lambda: tmp_path) + monkeypatch.setattr(cli.console, "print", lambda value: printed.append(value)) + + cli.skills_enable(agent_id="risk_manager", skill="extra_guard", config_name="demo") + cli.skills_disable(agent_id="risk_manager", skill="risk_review", config_name="demo") + cli.skills_list(config_name="demo", agent_id="risk_manager") + + text_dump = "\n".join(str(item) for item in printed) + assert "Enabled" in text_dump + assert "Disabled" in text_dump + assert any(getattr(item, "title", None) == "Skill Catalog" for item in printed) diff --git a/backend/tests/test_valuation_scripts.py b/backend/tests/test_valuation_scripts.py new file mode 100644 index 0000000..c75f5fb --- /dev/null +++ b/backend/tests/test_valuation_scripts.py @@ -0,0 +1,106 @@ +# -*- coding: utf-8 -*- + +from backend.agents.skills_manager import SkillsManager +from backend.skills.builtin.valuation_review.scripts.dcf_report import ( + build_dcf_report, +) +from backend.skills.builtin.valuation_review.scripts.multiple_valuation_report import ( + build_ev_ebitda_report, + build_residual_income_report, +) +from backend.skills.builtin.valuation_review.scripts.owner_earnings_report import ( + build_owner_earnings_report, +) + + +def test_build_dcf_report_renders_assessment(): + report = build_dcf_report( + [ + { + "ticker": "AAPL", + "current_fcf": 100.0, + "growth_rate": 0.05, + "market_cap": 900.0, + "discount_rate": 0.10, + "terminal_growth": 0.03, + "num_years": 5, + }, + ], + "2026-03-17", + ) + + assert "DCF Valuation Analysis (2026-03-17)" in report + assert "AAPL:" in report + assert "Market Cap: $900" in report + assert "Value Gap:" in report + + +def test_build_owner_earnings_report_handles_errors(): + report = build_owner_earnings_report( + [ + { + "ticker": "MSFT", + "error": "Negative owner earnings ($-50)", + }, + ], + "2026-03-17", + ) + + assert "MSFT: Negative owner earnings ($-50)" in report + + +def test_multiple_valuation_reports_render_expected_sections(): + ev_report = build_ev_ebitda_report( + [ + { + "ticker": "NVDA", + "current_multiple": 18.0, + "median_multiple": 20.0, + "current_ebitda": 50.0, + "market_cap": 800.0, + "net_debt": 100.0, + }, + ], + "2026-03-17", + ) + residual_report = build_residual_income_report( + [ + { + "ticker": "META", + "book_value": 200.0, + "initial_ri": 30.0, + "market_cap": 300.0, + "cost_of_equity": 0.10, + "bv_growth": 0.03, + "terminal_growth": 0.03, + "num_years": 5, + "margin_of_safety": 0.20, + }, + ], + "2026-03-17", + ) + + assert "EV/EBITDA Valuation (2026-03-17)" in ev_report + assert "NVDA:" in ev_report + assert "Residual Income Valuation (2026-03-17)" in residual_report + assert "META:" in residual_report + + +def test_prepare_active_skills_copies_skill_scripts(tmp_path): + builtin_skill = tmp_path / "backend" / "skills" / "builtin" / "valuation_review" + scripts_dir = builtin_skill / "scripts" + scripts_dir.mkdir(parents=True, exist_ok=True) + (builtin_skill / "SKILL.md").write_text( + "---\nname: 估值分析\ndescription: desc\nversion: 1.0.0\n---\n", + encoding="utf-8", + ) + (scripts_dir / "dcf_report.py").write_text("print('ok')\n", encoding="utf-8") + + manager = SkillsManager(project_root=tmp_path) + active_map = manager.prepare_active_skills( + config_name="demo", + agent_defaults={"valuation_analyst": ["valuation_review"]}, + ) + + active_dir = active_map["valuation_analyst"][0] + assert (active_dir / "scripts" / "dcf_report.py").exists() diff --git a/backend/tools/analysis_tools.py b/backend/tools/analysis_tools.py index 35a6e43..60bc565 100644 --- a/backend/tools/analysis_tools.py +++ b/backend/tools/analysis_tools.py @@ -22,6 +22,16 @@ from agentscope.message import TextBlock from agentscope.tool import ToolResponse from backend.data.provider_utils import normalize_symbol +from backend.skills.builtin.valuation_review.scripts.dcf_report import ( + build_dcf_report, +) +from backend.skills.builtin.valuation_review.scripts.multiple_valuation_report import ( + build_ev_ebitda_report, + build_residual_income_report, +) +from backend.skills.builtin.valuation_review.scripts.owner_earnings_report import ( + build_owner_earnings_report, +) from backend.tools.data_tools import ( get_company_news, get_financial_metrics, @@ -814,7 +824,7 @@ def dcf_valuation_analysis( current_date = _resolved_date(current_date) tickers = _parse_tickers(tickers) - lines = [f"=== DCF Valuation Analysis ({current_date}) ===\n"] + rows = [] for ticker in tickers: metrics = get_financial_metrics( @@ -823,7 +833,7 @@ def dcf_valuation_analysis( limit=8, ) if not metrics: - lines.append(f"{ticker}: No financial metrics\n") + rows.append({"ticker": ticker, "error": "No financial metrics"}) continue line_items = search_line_items( @@ -838,56 +848,28 @@ def dcf_valuation_analysis( or not line_items[0].free_cash_flow or line_items[0].free_cash_flow <= 0 ): - lines.append(f"{ticker}: Invalid free cash flow data\n") + rows.append({"ticker": ticker, "error": "Invalid free cash flow data"}) continue market_cap = get_market_cap(ticker, current_date) if not market_cap: - lines.append(f"{ticker}: Market cap unavailable\n") + rows.append({"ticker": ticker, "error": "Market cap unavailable"}) continue m = metrics[0] - current_fcf = line_items[0].free_cash_flow - growth_rate = m.earnings_growth or 0.05 - discount_rate = 0.10 - terminal_growth = 0.03 - num_years = 5 - - # DCF calculation - pv_fcf = sum( - current_fcf - * (1 + growth_rate) ** year - / (1 + discount_rate) ** year - for year in range(1, num_years + 1) + rows.append( + { + "ticker": ticker, + "current_fcf": line_items[0].free_cash_flow, + "growth_rate": m.earnings_growth or 0.05, + "market_cap": market_cap, + "discount_rate": 0.10, + "terminal_growth": 0.03, + "num_years": 5, + }, ) - terminal_fcf = ( - current_fcf - * (1 + growth_rate) ** num_years - * (1 + terminal_growth) - ) - terminal_value = terminal_fcf / (discount_rate - terminal_growth) - pv_terminal = terminal_value / (1 + discount_rate) ** num_years - enterprise_value = pv_fcf + pv_terminal - value_gap = (enterprise_value - market_cap) / market_cap * 100 - # Assessment - if value_gap > 20: - assessment = "SIGNIFICANTLY UNDERVALUED" - elif value_gap > 0: - assessment = "POTENTIALLY UNDERVALUED" - elif value_gap > -20: - assessment = "POTENTIALLY OVERVALUED" - else: - assessment = "SIGNIFICANTLY OVERVALUED" - - lines.append(f"{ticker}:") - lines.append(f" Current FCF: ${current_fcf:,.0f}") - lines.append(f" DCF Enterprise Value: ${enterprise_value:,.0f}") - lines.append(f" Market Cap: ${market_cap:,.0f}") - lines.append(f" Value Gap: {value_gap:+.1f}% -> {assessment}") - lines.append("") - - return _to_text_response("\n".join(lines)) + return _to_text_response(build_dcf_report(rows, current_date)) @safe @@ -911,7 +893,7 @@ def owner_earnings_valuation_analysis( current_date = _resolved_date(current_date) tickers = _parse_tickers(tickers) - lines = [f"=== Owner Earnings Valuation ({current_date}) ===\n"] + rows = [] for ticker in tickers: metrics = get_financial_metrics( @@ -920,7 +902,7 @@ def owner_earnings_valuation_analysis( limit=8, ) if not metrics: - lines.append(f"{ticker}: No financial metrics\n") + rows.append({"ticker": ticker, "error": "No financial metrics"}) continue line_items = search_line_items( @@ -936,12 +918,12 @@ def owner_earnings_valuation_analysis( limit=2, ) if len(line_items) < 2: - lines.append(f"{ticker}: Insufficient financial data\n") + rows.append({"ticker": ticker, "error": "Insufficient financial data"}) continue market_cap = get_market_cap(ticker, current_date) if not market_cap: - lines.append(f"{ticker}: Market cap unavailable\n") + rows.append({"ticker": ticker, "error": "Market cap unavailable"}) continue m = metrics[0] @@ -956,57 +938,27 @@ def owner_earnings_valuation_analysis( owner_earnings = net_income + depreciation - capex - wc_change if owner_earnings <= 0: - lines.append( - f"{ticker}: Negative owner earnings (${owner_earnings:,.0f})\n", + rows.append( + { + "ticker": ticker, + "error": f"Negative owner earnings (${owner_earnings:,.0f})", + }, ) continue - # Valuation - growth_rate = m.earnings_growth or 0.05 - required_return = 0.15 - margin_of_safety = 0.25 - num_years = 5 - - pv_earnings = sum( - owner_earnings - * (1 + growth_rate) ** year - / (1 + required_return) ** year - for year in range(1, num_years + 1) + rows.append( + { + "ticker": ticker, + "owner_earnings": owner_earnings, + "growth_rate": m.earnings_growth or 0.05, + "market_cap": market_cap, + "required_return": 0.15, + "margin_of_safety": 0.25, + "num_years": 5, + }, ) - terminal_growth = min(growth_rate, 0.03) - terminal_earnings = ( - owner_earnings - * (1 + growth_rate) ** num_years - * (1 + terminal_growth) - ) - terminal_value = terminal_earnings / ( - required_return - terminal_growth - ) - pv_terminal = terminal_value / (1 + required_return) ** num_years - intrinsic_value = (pv_earnings + pv_terminal) * (1 - margin_of_safety) - value_gap = (intrinsic_value - market_cap) / market_cap * 100 - - # Assessment - if value_gap > 20: - assessment = "SIGNIFICANTLY UNDERVALUED" - elif value_gap > 0: - assessment = "POTENTIALLY UNDERVALUED" - elif value_gap > -20: - assessment = "POTENTIALLY OVERVALUED" - else: - assessment = "SIGNIFICANTLY OVERVALUED" - - lines.append(f"{ticker}:") - lines.append(f" Owner Earnings: ${owner_earnings:,.0f}") - lines.append( - f" Intrinsic Value (w/ 25% MoS): ${intrinsic_value:,.0f}", - ) - lines.append(f" Market Cap: ${market_cap:,.0f}") - lines.append(f" Value Gap: {value_gap:+.1f}% -> {assessment}") - lines.append("") - - return _to_text_response("\n".join(lines)) + return _to_text_response(build_owner_earnings_report(rows, current_date)) @safe @@ -1030,7 +982,7 @@ def ev_ebitda_valuation_analysis( current_date = _resolved_date(current_date) tickers = _parse_tickers(tickers) - lines = [f"=== EV/EBITDA Valuation ({current_date}) ===\n"] + rows = [] for ticker in tickers: metrics = get_financial_metrics( @@ -1039,7 +991,7 @@ def ev_ebitda_valuation_analysis( limit=8, ) if not metrics: - lines.append(f"{ticker}: No financial metrics\n") + rows.append({"ticker": ticker, "error": "No financial metrics"}) continue m = metrics[0] @@ -1048,12 +1000,12 @@ def ev_ebitda_valuation_analysis( or not m.enterprise_value_to_ebitda_ratio or m.enterprise_value_to_ebitda_ratio <= 0 ): - lines.append(f"{ticker}: Missing EV/EBITDA data\n") + rows.append({"ticker": ticker, "error": "Missing EV/EBITDA data"}) continue market_cap = get_market_cap(ticker, current_date) if not market_cap: - lines.append(f"{ticker}: Market cap unavailable\n") + rows.append({"ticker": ticker, "error": "Market cap unavailable"}) continue current_ebitda = ( @@ -1067,42 +1019,21 @@ def ev_ebitda_valuation_analysis( and x.enterprise_value_to_ebitda_ratio > 0 ] if len(valid_multiples) < 3: - lines.append(f"{ticker}: Insufficient historical data\n") + rows.append({"ticker": ticker, "error": "Insufficient historical data"}) continue - median_multiple = median(valid_multiples) - current_multiple = m.enterprise_value_to_ebitda_ratio - - implied_ev = median_multiple * current_ebitda - net_debt = m.enterprise_value - market_cap - implied_equity = max(implied_ev - net_debt, 0) - - value_gap = ( - (implied_equity - market_cap) / market_cap * 100 - if market_cap > 0 - else 0 - ) - multiple_discount = ( - (median_multiple - current_multiple) / median_multiple * 100 + rows.append( + { + "ticker": ticker, + "current_multiple": m.enterprise_value_to_ebitda_ratio, + "median_multiple": median(valid_multiples), + "current_ebitda": current_ebitda, + "market_cap": market_cap, + "net_debt": m.enterprise_value - market_cap, + }, ) - # Assessment - if multiple_discount > 10: - assessment = "TRADING BELOW HISTORICAL MULTIPLE" - elif multiple_discount > -10: - assessment = "NEAR HISTORICAL AVERAGE" - else: - assessment = "TRADING ABOVE HISTORICAL MULTIPLE" - - lines.append(f"{ticker}:") - lines.append(f" Current EV/EBITDA: {current_multiple:.1f}x") - lines.append(f" Historical Median: {median_multiple:.1f}x") - lines.append(f" Multiple vs History: {multiple_discount:+.1f}%") - lines.append(f" Implied Equity Value: ${implied_equity:,.0f}") - lines.append(f" Value Gap: {value_gap:+.1f}% -> {assessment}") - lines.append("") - - return _to_text_response("\n".join(lines)) + return _to_text_response(build_ev_ebitda_report(rows, current_date)) @safe @@ -1126,7 +1057,7 @@ def residual_income_valuation_analysis( current_date = _resolved_date(current_date) tickers = _parse_tickers(tickers) - lines = [f"=== Residual Income Valuation ({current_date}) ===\n"] + rows = [] for ticker in tickers: metrics = get_financial_metrics( @@ -1135,7 +1066,7 @@ def residual_income_valuation_analysis( limit=8, ) if not metrics: - lines.append(f"{ticker}: No financial metrics\n") + rows.append({"ticker": ticker, "error": "No financial metrics"}) continue line_items = search_line_items( @@ -1146,59 +1077,44 @@ def residual_income_valuation_analysis( limit=1, ) if not line_items or not line_items[0].net_income: - lines.append(f"{ticker}: No net income data\n") + rows.append({"ticker": ticker, "error": "No net income data"}) continue market_cap = get_market_cap(ticker, current_date) if not market_cap: - lines.append(f"{ticker}: Market cap unavailable\n") + rows.append({"ticker": ticker, "error": "Market cap unavailable"}) continue m = metrics[0] if not m.price_to_book_ratio or m.price_to_book_ratio <= 0: - lines.append(f"{ticker}: Invalid P/B ratio\n") + rows.append({"ticker": ticker, "error": "Invalid P/B ratio"}) continue net_income = line_items[0].net_income pb_ratio = m.price_to_book_ratio book_value = market_cap / pb_ratio - # Model parameters cost_of_equity = 0.10 - bv_growth = m.book_value_growth or 0.03 - terminal_growth = 0.03 - num_years = 5 - margin_of_safety = 0.20 - initial_ri = net_income - cost_of_equity * book_value if initial_ri <= 0: - lines.append(f"{ticker}: Negative residual income\n") + rows.append({"ticker": ticker, "error": "Negative residual income"}) continue - # PV calculation - pv_ri = sum( - initial_ri * (1 + bv_growth) ** year / (1 + cost_of_equity) ** year - for year in range(1, num_years + 1) + rows.append( + { + "ticker": ticker, + "book_value": book_value, + "initial_ri": initial_ri, + "market_cap": market_cap, + "cost_of_equity": cost_of_equity, + "bv_growth": m.book_value_growth or 0.03, + "terminal_growth": 0.03, + "num_years": 5, + "margin_of_safety": 0.20, + }, ) - terminal_ri = initial_ri * (1 + bv_growth) ** (num_years + 1) - terminal_value = terminal_ri / (cost_of_equity - terminal_growth) - pv_terminal = terminal_value / (1 + cost_of_equity) ** num_years - intrinsic_value = (book_value + pv_ri + pv_terminal) * ( - 1 - margin_of_safety - ) - value_gap = (intrinsic_value - market_cap) / market_cap * 100 - - lines.append(f"{ticker}:") - lines.append(f" Book Value: ${book_value:,.0f}") - lines.append(f" Residual Income: ${initial_ri:,.0f}") - lines.append( - f" Intrinsic Value (w/ 20% MoS): ${intrinsic_value:,.0f}", - ) - lines.append(f" Value Gap: {value_gap:+.1f}%") - lines.append("") - - return _to_text_response("\n".join(lines)) + return _to_text_response(build_residual_income_report(rows, current_date)) # Tool Registry for dynamic toolkit creation diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx index 014baa5..f1bf340 100644 --- a/frontend/src/App.jsx +++ b/frontend/src/App.jsx @@ -17,6 +17,7 @@ import NetValueChart from './components/NetValueChart'; import StockLogo from './components/StockLogo'; import Header from './components/Header.jsx'; import WatchlistPanel from './components/WatchlistPanel.jsx'; +import RuntimeSettingsPanel from './components/RuntimeSettingsPanel.jsx'; // Utils import { formatNumber, formatTickerPrice } from './utils/formatters'; @@ -25,6 +26,8 @@ const RoomView = lazy(() => import('./components/RoomView')); const AgentFeed = lazy(() => import('./components/AgentFeed')); const StatisticsView = lazy(() => import('./components/StatisticsView')); const StockExplainView = lazy(() => import('./components/StockExplainView.jsx')); +const TraderView = lazy(() => import('./components/TraderView.jsx')); +const EDITABLE_AGENT_WORKSPACE_FILES = ['SOUL.md', 'PROFILE.md', 'AGENTS.md', 'MEMORY.md', 'POLICY.md', 'HEARTBEAT.md', 'ROLE.md', 'STYLE.md']; function ViewLoadingFallback({ label = '加载中...' }) { return ( @@ -61,8 +64,8 @@ export default function LiveTradingApp() { const [progress, setProgress] = useState({ current: 0, total: 0 }); const [now, setNow] = useState(() => new Date()); - // View toggle: 'room' | 'explain' | 'chart' | 'statistics' - const [currentView, setCurrentView] = useState('chart'); // Start with chart, then animate to room + // View toggle: 'traders' | 'room' | 'explain' | 'chart' | 'statistics' + const [currentView, setCurrentView] = useState('traders'); const [isInitialAnimating, setIsInitialAnimating] = useState(true); const [lastUpdate, setLastUpdate] = useState(new Date()); const [isUpdating, setIsUpdating] = useState(false); @@ -112,15 +115,38 @@ export default function LiveTradingApp() { const [dataSources, setDataSources] = useState(null); const [runtimeConfig, setRuntimeConfig] = useState(null); const [isWatchlistPanelOpen, setIsWatchlistPanelOpen] = useState(false); + const [isRuntimeSettingsOpen, setIsRuntimeSettingsOpen] = useState(false); const [watchlistDraftSymbols, setWatchlistDraftSymbols] = useState([]); const [watchlistInputValue, setWatchlistInputValue] = useState(''); const [watchlistFeedback, setWatchlistFeedback] = useState(null); const [isWatchlistSaving, setIsWatchlistSaving] = useState(false); + const [scheduleModeDraft, setScheduleModeDraft] = useState('daily'); + const [intervalMinutesDraft, setIntervalMinutesDraft] = useState('60'); + const [triggerTimeDraft, setTriggerTimeDraft] = useState('09:30'); + const [maxCommCyclesDraft, setMaxCommCyclesDraft] = useState('2'); + const [runtimeConfigFeedback, setRuntimeConfigFeedback] = useState(null); + const [isRuntimeConfigSaving, setIsRuntimeConfigSaving] = useState(false); + const [selectedSkillAgentId, setSelectedSkillAgentId] = useState(AGENTS[0]?.id || 'portfolio_manager'); + const [agentProfilesByAgent, setAgentProfilesByAgent] = useState({}); + const [agentSkillsByAgent, setAgentSkillsByAgent] = useState({}); + const [skillDetailsByName, setSkillDetailsByName] = useState({}); + const [localSkillDraftsByKey, setLocalSkillDraftsByKey] = useState({}); + const [isAgentSkillsLoading, setIsAgentSkillsLoading] = useState(false); + const [skillDetailLoadingKey, setSkillDetailLoadingKey] = useState(null); + const [agentSkillsSavingKey, setAgentSkillsSavingKey] = useState(null); + const [agentSkillsFeedback, setAgentSkillsFeedback] = useState(null); + const [selectedWorkspaceFile, setSelectedWorkspaceFile] = useState(EDITABLE_AGENT_WORKSPACE_FILES[0]); + const [workspaceFilesByAgent, setWorkspaceFilesByAgent] = useState({}); + const [workspaceDraftContent, setWorkspaceDraftContent] = useState(''); + const [isWorkspaceFileLoading, setIsWorkspaceFileLoading] = useState(false); + const [workspaceFileSavingKey, setWorkspaceFileSavingKey] = useState(null); + const [workspaceFileFeedback, setWorkspaceFileFeedback] = useState(null); const clientRef = useRef(null); const containerRef = useRef(null); const agentFeedRef = useRef(null); const isWatchlistSavingRef = useRef(false); + const isRuntimeConfigSavingRef = useRef(false); const requestedStockHistoryRef = useRef(new Set()); // Track last virtual time update to calculate increment @@ -220,6 +246,38 @@ export default function LiveTradingApp() { .filter((symbol) => typeof symbol === 'string' && symbol.trim()); }, [displayTickers, runtimeConfig]); + const runtimeSummaryLabel = useMemo(() => { + if (!runtimeConfig) { + return null; + } + + const scheduleMode = String(runtimeConfig.schedule_mode || 'daily'); + const intervalMinutes = Number(runtimeConfig.interval_minutes || 60); + const triggerTime = String(runtimeConfig.trigger_time || '09:30'); + const maxCommCycles = Number(runtimeConfig.max_comm_cycles || 2); + + if (scheduleMode === 'intraday') { + return `调度 intraday / ${intervalMinutes}m / 讨论 ${maxCommCycles} 轮`; + } + + return `调度 daily / ${triggerTime} ET / 讨论 ${maxCommCycles} 轮`; + }, [runtimeConfig]); + + const selectedAgentSkills = useMemo( + () => agentSkillsByAgent[selectedSkillAgentId] || [], + [agentSkillsByAgent, selectedSkillAgentId] + ); + + const selectedAgentProfile = useMemo( + () => agentProfilesByAgent[selectedSkillAgentId] || null, + [agentProfilesByAgent, selectedSkillAgentId] + ); + + const selectedWorkspaceContent = useMemo( + () => workspaceFilesByAgent[selectedSkillAgentId]?.[selectedWorkspaceFile] || '', + [selectedSkillAgentId, selectedWorkspaceFile, workspaceFilesByAgent] + ); + useEffect(() => { const symbols = displayTickers .map((ticker) => ticker.symbol) @@ -235,6 +293,17 @@ export default function LiveTradingApp() { } }, [displayTickers, selectedExplainSymbol]); + useEffect(() => { + if (!runtimeConfig) { + return; + } + + setScheduleModeDraft(String(runtimeConfig.schedule_mode || 'daily')); + setIntervalMinutesDraft(String(runtimeConfig.interval_minutes || 60)); + setTriggerTimeDraft(String(runtimeConfig.trigger_time || '09:30')); + setMaxCommCyclesDraft(String(runtimeConfig.max_comm_cycles || 2)); + }, [runtimeConfig]); + const watchlistSuggestions = useMemo( () => INITIAL_TICKERS.map((ticker) => ticker.symbol).filter((symbol, index, list) => list.indexOf(symbol) === index), [] @@ -350,6 +419,7 @@ export default function LiveTradingApp() { }, [watchlistFeedback]); const handleWatchlistPanelToggle = useCallback(() => { + setIsRuntimeSettingsOpen(false); setIsWatchlistPanelOpen((open) => { const nextOpen = !open; if (nextOpen) { @@ -425,6 +495,292 @@ export default function LiveTradingApp() { } }, [parseWatchlistInput, watchlistDraftSymbols, watchlistInputValue]); + const handleManualTrigger = useCallback(() => { + if (!clientRef.current) { + addSystemMessage('连接未就绪,无法手动触发'); + return; + } + + const success = clientRef.current.send({ + type: 'trigger_strategy' + }); + + if (!success) { + addSystemMessage('手动触发发送失败,请检查连接状态'); + return; + } + + addSystemMessage('已发送手动触发请求'); + }, [addSystemMessage]); + + const handleRuntimeConfigSave = useCallback(() => { + if (!clientRef.current) { + setRuntimeConfigFeedback({ type: 'error', text: '连接未就绪,稍后重试' }); + return; + } + + const interval = Number(intervalMinutesDraft); + const maxCommCycles = Number(maxCommCyclesDraft); + if (!Number.isInteger(interval) || interval <= 0) { + setRuntimeConfigFeedback({ type: 'error', text: '间隔必须是正整数分钟' }); + return; + } + if (!Number.isInteger(maxCommCycles) || maxCommCycles <= 0) { + setRuntimeConfigFeedback({ type: 'error', text: '讨论轮数必须是正整数' }); + return; + } + + setIsRuntimeConfigSaving(true); + setRuntimeConfigFeedback(null); + const success = clientRef.current.send({ + type: 'update_runtime_config', + schedule_mode: scheduleModeDraft, + interval_minutes: interval, + trigger_time: triggerTimeDraft, + max_comm_cycles: maxCommCycles + }); + + if (!success) { + setIsRuntimeConfigSaving(false); + setRuntimeConfigFeedback({ type: 'error', text: '发送失败,请检查连接状态' }); + } + }, [intervalMinutesDraft, maxCommCyclesDraft, scheduleModeDraft, triggerTimeDraft]); + + const handleRuntimeDefaultsRestore = useCallback(() => { + setScheduleModeDraft('daily'); + setIntervalMinutesDraft('60'); + setTriggerTimeDraft('09:30'); + setMaxCommCyclesDraft('2'); + setRuntimeConfigFeedback(null); + }, []); + + const handleRuntimeSettingsToggle = useCallback(() => { + setRuntimeConfigFeedback(null); + setAgentSkillsFeedback(null); + setWorkspaceFileFeedback(null); + setIsRuntimeSettingsOpen((prev) => !prev); + setIsWatchlistPanelOpen(false); + }, []); + + const requestAgentSkills = useCallback((agentId) => { + const normalized = typeof agentId === 'string' ? agentId.trim() : ''; + if (!normalized || !clientRef.current) { + return false; + } + setIsAgentSkillsLoading(true); + setAgentSkillsFeedback(null); + return clientRef.current.send({ + type: 'get_agent_skills', + agent_id: normalized + }); + }, []); + + const requestAgentProfile = useCallback((agentId) => { + const normalized = typeof agentId === 'string' ? agentId.trim() : ''; + if (!normalized || !clientRef.current) { + return false; + } + return clientRef.current.send({ + type: 'get_agent_profile', + agent_id: normalized + }); + }, []); + + const requestSkillDetail = useCallback((skillName) => { + const normalized = typeof skillName === 'string' ? skillName.trim() : ''; + if (!normalized || !clientRef.current) { + return false; + } + const detailKey = `${selectedSkillAgentId}:${normalized}`; + setSkillDetailLoadingKey(detailKey); + return clientRef.current.send({ + type: 'get_skill_detail', + agent_id: selectedSkillAgentId, + skill_name: normalized + }); + }, [selectedSkillAgentId]); + + const handleCreateLocalSkill = useCallback((skillName) => { + const normalized = typeof skillName === 'string' ? skillName.trim() : ''; + if (!normalized) { + setAgentSkillsFeedback({ type: 'error', text: '技能名称不能为空' }); + return; + } + if (!clientRef.current) { + setAgentSkillsFeedback({ type: 'error', text: '连接未就绪,稍后重试' }); + return; + } + setAgentSkillsSavingKey(`${selectedSkillAgentId}:${normalized}:create`); + setAgentSkillsFeedback(null); + const success = clientRef.current.send({ + type: 'create_agent_local_skill', + agent_id: selectedSkillAgentId, + skill_name: normalized + }); + if (!success) { + setAgentSkillsSavingKey(null); + setAgentSkillsFeedback({ type: 'error', text: '发送失败,请检查连接状态' }); + } + }, [selectedSkillAgentId]); + + const handleLocalSkillDraftChange = useCallback((skillName, content) => { + const detailKey = `${selectedSkillAgentId}:${skillName}`; + setLocalSkillDraftsByKey((prev) => ({ + ...prev, + [detailKey]: content + })); + }, [selectedSkillAgentId]); + + const handleLocalSkillSave = useCallback((skillName) => { + if (!clientRef.current) { + setAgentSkillsFeedback({ type: 'error', text: '连接未就绪,稍后重试' }); + return; + } + const detailKey = `${selectedSkillAgentId}:${skillName}`; + const content = localSkillDraftsByKey[detailKey]; + if (typeof content !== 'string') { + return; + } + setAgentSkillsSavingKey(`${selectedSkillAgentId}:${skillName}:content`); + setAgentSkillsFeedback(null); + const success = clientRef.current.send({ + type: 'update_agent_local_skill', + agent_id: selectedSkillAgentId, + skill_name: skillName, + content + }); + if (!success) { + setAgentSkillsSavingKey(null); + setAgentSkillsFeedback({ type: 'error', text: '发送失败,请检查连接状态' }); + } + }, [localSkillDraftsByKey, selectedSkillAgentId]); + + const handleLocalSkillDelete = useCallback((skillName) => { + if (!clientRef.current) { + setAgentSkillsFeedback({ type: 'error', text: '连接未就绪,稍后重试' }); + return; + } + setAgentSkillsSavingKey(`${selectedSkillAgentId}:${skillName}:delete`); + setAgentSkillsFeedback(null); + const success = clientRef.current.send({ + type: 'delete_agent_local_skill', + agent_id: selectedSkillAgentId, + skill_name: skillName + }); + if (!success) { + setAgentSkillsSavingKey(null); + setAgentSkillsFeedback({ type: 'error', text: '发送失败,请检查连接状态' }); + } + }, [selectedSkillAgentId]); + + const handleRemoveSharedSkill = useCallback((skillName) => { + if (!clientRef.current) { + setAgentSkillsFeedback({ type: 'error', text: '连接未就绪,稍后重试' }); + return; + } + setAgentSkillsSavingKey(`${selectedSkillAgentId}:${skillName}:remove`); + setAgentSkillsFeedback(null); + const success = clientRef.current.send({ + type: 'remove_agent_skill', + agent_id: selectedSkillAgentId, + skill_name: skillName + }); + if (!success) { + setAgentSkillsSavingKey(null); + setAgentSkillsFeedback({ type: 'error', text: '发送失败,请检查连接状态' }); + } + }, [selectedSkillAgentId]); + + const requestWorkspaceFile = useCallback((agentId, filename) => { + const normalizedAgentId = typeof agentId === 'string' ? agentId.trim() : ''; + const normalizedFilename = typeof filename === 'string' ? filename.trim() : ''; + if (!normalizedAgentId || !normalizedFilename || !clientRef.current) { + return false; + } + setIsWorkspaceFileLoading(true); + setWorkspaceFileFeedback(null); + return clientRef.current.send({ + type: 'get_agent_workspace_file', + agent_id: normalizedAgentId, + filename: normalizedFilename + }); + }, []); + + const handleAgentSkillToggle = useCallback((skillName, enabled) => { + if (!clientRef.current) { + setAgentSkillsFeedback({ type: 'error', text: '连接未就绪,稍后重试' }); + return; + } + + const agentId = selectedSkillAgentId; + setAgentSkillsSavingKey(`${agentId}:${skillName}`); + setAgentSkillsFeedback(null); + const success = clientRef.current.send({ + type: 'update_agent_skill', + agent_id: agentId, + skill_name: skillName, + enabled + }); + + if (!success) { + setAgentSkillsSavingKey(null); + setAgentSkillsFeedback({ type: 'error', text: '发送失败,请检查连接状态' }); + } + }, [selectedSkillAgentId]); + + const handleSkillAgentChange = useCallback((agentId) => { + setSelectedSkillAgentId(agentId); + requestAgentProfile(agentId); + requestAgentSkills(agentId); + requestWorkspaceFile(agentId, selectedWorkspaceFile); + }, [requestAgentProfile, requestAgentSkills, requestWorkspaceFile, selectedWorkspaceFile]); + + const handleWorkspaceFileChange = useCallback((filename) => { + setSelectedWorkspaceFile(filename); + requestWorkspaceFile(selectedSkillAgentId, filename); + }, [requestWorkspaceFile, selectedSkillAgentId]); + + const handleWorkspaceFileSave = useCallback(() => { + if (!clientRef.current) { + setWorkspaceFileFeedback({ type: 'error', text: '连接未就绪,稍后重试' }); + return; + } + const key = `${selectedSkillAgentId}:${selectedWorkspaceFile}`; + setWorkspaceFileSavingKey(key); + setWorkspaceFileFeedback(null); + const success = clientRef.current.send({ + type: 'update_agent_workspace_file', + agent_id: selectedSkillAgentId, + filename: selectedWorkspaceFile, + content: workspaceDraftContent + }); + if (!success) { + setWorkspaceFileSavingKey(null); + setWorkspaceFileFeedback({ type: 'error', text: '发送失败,请检查连接状态' }); + } + }, [selectedSkillAgentId, selectedWorkspaceFile, workspaceDraftContent]); + + useEffect(() => { + setWorkspaceDraftContent(selectedWorkspaceContent); + }, [selectedWorkspaceContent]); + + useEffect(() => { + if (currentView !== 'traders' || !isConnected) { + return; + } + AGENTS.forEach((agent) => { + if (!agentProfilesByAgent[agent.id]) { + requestAgentProfile(agent.id); + } + if (!agentSkillsByAgent[agent.id]) { + requestAgentSkills(agent.id); + } + if (!workspaceFilesByAgent[agent.id]?.['MEMORY.md']) { + requestWorkspaceFile(agent.id, 'MEMORY.md'); + } + }); + }, [agentProfilesByAgent, agentSkillsByAgent, currentView, isConnected, requestAgentProfile, requestAgentSkills, requestWorkspaceFile, workspaceFilesByAgent]); + const requestStockHistory = useCallback((symbol, { force = false } = {}) => { const normalized = typeof symbol === 'string' ? symbol.trim().toUpperCase() : ''; if (!normalized || !clientRef.current) { @@ -604,6 +960,10 @@ export default function LiveTradingApp() { isWatchlistSavingRef.current = isWatchlistSaving; }, [isWatchlistSaving]); + useEffect(() => { + isRuntimeConfigSavingRef.current = isRuntimeConfigSaving; + }, [isRuntimeConfigSaving]); + useEffect(() => { if (currentView !== 'explain' || !selectedExplainSymbol) { return; @@ -670,24 +1030,18 @@ export default function LiveTradingApp() { return () => clearTimeout(timer); }, [holdings, stats, trades, portfolioData.netValue]); - // Initial animation: show room drawer sliding in + // Initial animation flag for slider speed useEffect(() => { - // Wait a bit after mount, then trigger slide to room - const slideTimer = setTimeout(() => { - setCurrentView('room'); - }, 1200); // Wait 1200ms before starting animation (2x slower) - - // Disable animation flag after animation completes const completeTimer = setTimeout(() => { setIsInitialAnimating(false); - }, 5000); // 1200ms delay + 1600ms animation duration + 400ms buffer + }, 1800); return () => { - clearTimeout(slideTimer); clearTimeout(completeTimer); }; }, []); + // Helper to check if bubble should still be visible // Bubbles persist until replaced by ANY new message (cross-role) // When any agent sends a new message, all previous bubbles are cleared @@ -769,21 +1123,38 @@ export default function LiveTradingApp() { const handlers = { // Error response (for fast forward errors) error: (e) => { - console.error('[Error]', e.message); + const message = typeof e.message === 'string' ? e.message : '请求失败'; + console.error('[Error]', message); + setIsAgentSkillsLoading(false); + setSkillDetailLoadingKey(null); + setAgentSkillsSavingKey(null); + setIsWorkspaceFileLoading(false); + setWorkspaceFileSavingKey(null); if (isWatchlistSavingRef.current) { setIsWatchlistSaving(false); - setWatchlistFeedback({ type: 'error', text: e.message || '更新 watchlist 失败' }); + setWatchlistFeedback({ type: 'error', text: message || '更新 watchlist 失败' }); + } + if (isRuntimeConfigSavingRef.current) { + setIsRuntimeConfigSaving(false); + setRuntimeConfigFeedback({ type: 'error', text: message }); + } + if (message.includes('skill') || message.includes('agent_id')) { + setAgentSkillsFeedback({ type: 'error', text: message || '更新技能失败' }); + } + if (message.includes('workspace_file') || message.includes('filename')) { + setWorkspaceFileFeedback({ type: 'error', text: message || '更新工作区文件失败' }); } // Handle fast forward errors - if (e.message && e.message.includes('fast forward')) { - console.warn(`⚠️ ${e.message}`); + if (message.includes('fast forward')) { + console.warn(`⚠️ ${message}`); handlePushEvent({ type: 'system', - content: `⚠️ ${e.message}`, + content: `⚠️ ${message}`, timestamp: Date.now() }); } + addSystemMessage(message); }, // Connection events @@ -930,9 +1301,163 @@ export default function LiveTradingApp() { if (isWatchlistSavingRef.current) { setIsWatchlistSaving(false); } + if (isRuntimeConfigSavingRef.current) { + setIsRuntimeConfigSaving(false); + setRuntimeConfigFeedback({ type: 'success', text: '运行配置已保存并生效' }); + } + const warnings = Array.isArray(e.runtime_config_warnings) ? e.runtime_config_warnings : []; + warnings.forEach((warning) => addSystemMessage(warning)); addSystemMessage('运行时配置已热更新'); }, + agent_skills_loaded: (e) => { + const agentId = typeof e.agent_id === 'string' ? e.agent_id.trim() : ''; + if (!agentId) { + setIsAgentSkillsLoading(false); + return; + } + setAgentSkillsByAgent((prev) => ({ + ...prev, + [agentId]: Array.isArray(e.skills) ? e.skills : [] + })); + setIsAgentSkillsLoading(false); + setAgentSkillsSavingKey(null); + }, + + agent_profile_loaded: (e) => { + const agentId = typeof e.agent_id === 'string' ? e.agent_id.trim() : ''; + if (!agentId) { + return; + } + setAgentProfilesByAgent((prev) => ({ + ...prev, + [agentId]: e.profile && typeof e.profile === 'object' ? e.profile : {} + })); + }, + + skill_detail_loaded: (e) => { + const skillName = typeof e.skill?.skill_name === 'string' ? e.skill.skill_name.trim() : ''; + const agentId = typeof e.agent_id === 'string' ? e.agent_id.trim() : selectedSkillAgentId; + if (!skillName) { + setSkillDetailLoadingKey(null); + return; + } + const detailKey = `${agentId}:${skillName}`; + setSkillDetailsByName((prev) => ({ + ...prev, + [detailKey]: e.skill + })); + setLocalSkillDraftsByKey((prev) => ({ + ...prev, + [detailKey]: typeof e.skill?.content === 'string' ? e.skill.content : '' + })); + setSkillDetailLoadingKey(null); + }, + + agent_skill_updated: (e) => { + const agentId = typeof e.agent_id === 'string' ? e.agent_id.trim() : ''; + const skillName = typeof e.skill_name === 'string' ? e.skill_name.trim() : ''; + if (!agentId || !skillName) { + return; + } + setAgentSkillsFeedback({ + type: 'success', + text: `${agentId} ${e.enabled ? '已启用' : '已禁用'} ${skillName}` + }); + }, + + agent_local_skill_created: (e) => { + const agentId = typeof e.agent_id === 'string' ? e.agent_id.trim() : ''; + const skillName = typeof e.skill_name === 'string' ? e.skill_name.trim() : ''; + setAgentSkillsSavingKey(null); + if (!agentId || !skillName) { + return; + } + setAgentSkillsFeedback({ + type: 'success', + text: `${agentId} 已创建本地技能 ${skillName}` + }); + }, + + agent_local_skill_updated: (e) => { + const agentId = typeof e.agent_id === 'string' ? e.agent_id.trim() : ''; + const skillName = typeof e.skill_name === 'string' ? e.skill_name.trim() : ''; + setAgentSkillsSavingKey(null); + if (!agentId || !skillName) { + return; + } + setAgentSkillsFeedback({ + type: 'success', + text: `${agentId} 的本地技能 ${skillName} 已保存` + }); + }, + + agent_local_skill_deleted: (e) => { + const agentId = typeof e.agent_id === 'string' ? e.agent_id.trim() : ''; + const skillName = typeof e.skill_name === 'string' ? e.skill_name.trim() : ''; + setAgentSkillsSavingKey(null); + if (!agentId || !skillName) { + return; + } + setSkillDetailsByName((prev) => { + const next = { ...prev }; + delete next[`${agentId}:${skillName}`]; + return next; + }); + setLocalSkillDraftsByKey((prev) => { + const next = { ...prev }; + delete next[`${agentId}:${skillName}`]; + return next; + }); + setAgentSkillsFeedback({ + type: 'success', + text: `${agentId} 的本地技能 ${skillName} 已删除` + }); + }, + + agent_skill_removed: (e) => { + const agentId = typeof e.agent_id === 'string' ? e.agent_id.trim() : ''; + const skillName = typeof e.skill_name === 'string' ? e.skill_name.trim() : ''; + setAgentSkillsSavingKey(null); + if (!agentId || !skillName) { + return; + } + setAgentSkillsFeedback({ + type: 'success', + text: `${agentId} 已移除共享技能 ${skillName}` + }); + }, + + agent_workspace_file_loaded: (e) => { + const agentId = typeof e.agent_id === 'string' ? e.agent_id.trim() : ''; + const filename = typeof e.filename === 'string' ? e.filename.trim() : ''; + if (!agentId || !filename) { + setIsWorkspaceFileLoading(false); + return; + } + setWorkspaceFilesByAgent((prev) => ({ + ...prev, + [agentId]: { + ...(prev[agentId] || {}), + [filename]: typeof e.content === 'string' ? e.content : '' + } + })); + setIsWorkspaceFileLoading(false); + setWorkspaceFileSavingKey(null); + }, + + agent_workspace_file_updated: (e) => { + const agentId = typeof e.agent_id === 'string' ? e.agent_id.trim() : ''; + const filename = typeof e.filename === 'string' ? e.filename.trim() : ''; + if (!agentId || !filename) { + return; + } + setWorkspaceFileFeedback({ + type: 'success', + text: `${agentId} 的 ${filename} 已保存` + }); + }, + watchlist_updated: (e) => { if (Array.isArray(e.tickers)) { const normalizedTickers = e.tickers.map((symbol) => String(symbol).trim().toUpperCase()); @@ -1713,10 +2238,41 @@ export default function LiveTradingApp() { )} + {runtimeSummaryLabel && ( + <> + · + + {runtimeSummaryLabel} + + + )} · {now.toLocaleTimeString('en-US', { hour: '2-digit', minute: '2-digit', second: '2-digit', hour12: false })} + {serverMode !== 'backtest' && ( + + )} + + + setIsRuntimeSettingsOpen(false)} + onScheduleModeChange={setScheduleModeDraft} + onIntervalMinutesChange={setIntervalMinutesDraft} + onTriggerTimeChange={setTriggerTimeDraft} + onMaxCommCyclesChange={setMaxCommCyclesDraft} + onSave={handleRuntimeConfigSave} + onRestoreDefaults={handleRuntimeDefaultsRestore} + /> @@ -1783,6 +2359,13 @@ export default function LiveTradingApp() {
+ +
- {/* Slider container with four views */} -
+
+ }> + + +
+ {/* Room View Panel */}
}> diff --git a/frontend/src/components/RuntimeSettingsPanel.jsx b/frontend/src/components/RuntimeSettingsPanel.jsx new file mode 100644 index 0000000..f5e2fdb --- /dev/null +++ b/frontend/src/components/RuntimeSettingsPanel.jsx @@ -0,0 +1,247 @@ +import React from 'react'; + +export default function RuntimeSettingsPanel({ + isOpen, + isConnected, + isSaving, + feedback, + runtimeConfig, + scheduleMode, + intervalMinutes, + triggerTime, + maxCommCycles, + onToggle, + onClose, + onScheduleModeChange, + onIntervalMinutesChange, + onTriggerTimeChange, + onMaxCommCyclesChange, + onSave, + onRestoreDefaults +}) { + return ( +
+ + + {isOpen && ( +
+
+
+
+ 运行设置 +
+
+ 保存后立即热更新当前运行中的调度参数 +
+
+ +
+ +
+ + + +
+ + + + + +
+ + +
+ + {feedback && ( + + {feedback.text} + + )} + + {runtimeConfig && ( +
+
+
+ 当前生效配置 +
+
+ 这里显示当前 run 已加载并生效的参数 +
+
+ +
+
tickers: {(runtimeConfig.tickers || []).join(', ') || '-'}
+
schedule_mode: {runtimeConfig.schedule_mode || '-'}
+
interval_minutes: {runtimeConfig.interval_minutes ?? '-'}
+
trigger_time: {runtimeConfig.trigger_time || '-'}
+
max_comm_cycles: {runtimeConfig.max_comm_cycles ?? '-'}
+
initial_cash: {runtimeConfig.initial_cash ?? '-'}
+
margin_requirement: {runtimeConfig.margin_requirement ?? '-'}
+
enable_memory: {String(runtimeConfig.enable_memory ?? false)}
+
+
+ )} +
+ )} +
+ ); +} diff --git a/frontend/src/components/TraderView.jsx b/frontend/src/components/TraderView.jsx new file mode 100644 index 0000000..6acf744 --- /dev/null +++ b/frontend/src/components/TraderView.jsx @@ -0,0 +1,765 @@ +import React, { useEffect, useMemo, useState } from 'react'; +import { createPortal } from 'react-dom'; +import { getModelIcon, getShortModelName } from '../utils/modelIcons'; + +export default function TraderView({ + agents, + agentProfilesByAgent, + agentSkillsByAgent, + workspaceFilesByAgent, + selectedAgentId, + selectedAgentProfile, + selectedAgentSkills, + skillDetailsByName, + localSkillDraftsByKey, + skillDetailLoadingKey, + editableFiles, + selectedWorkspaceFile, + workspaceFileContent, + workspaceDraftContent, + isConnected, + isAgentSkillsLoading, + agentSkillsSavingKey, + agentSkillsFeedback, + isWorkspaceFileLoading, + workspaceFileSavingKey, + workspaceFileFeedback, + onAgentChange, + onCreateLocalSkill, + onSkillDetailRequest, + onLocalSkillDraftChange, + onLocalSkillDelete, + onLocalSkillSave, + onRemoveSharedSkill, + onSkillToggle, + onWorkspaceFileChange, + onWorkspaceDraftChange, + onWorkspaceFileSave +}) { + const [expandedSkillKey, setExpandedSkillKey] = useState(null); + const [newLocalSkillName, setNewLocalSkillName] = useState(''); + const [isSkillPickerOpen, setIsSkillPickerOpen] = useState(false); + + const selectedAgent = useMemo( + () => agents.find((agent) => agent.id === selectedAgentId) || agents[0] || null, + [agents, selectedAgentId] + ); + + useEffect(() => { + setExpandedSkillKey(null); + }, [selectedAgentId]); + + if (!selectedAgent) { + return null; + } + + const profile = selectedAgentProfile || {}; + const modelInfo = getModelIcon(profile.model_name, profile.model_provider); + const activeSkills = selectedAgentSkills.filter((item) => item.status === 'enabled' || item.status === 'active'); + const installedSkills = selectedAgentSkills.filter((item) => item.status !== 'available'); + const availableSkills = selectedAgentSkills.filter((item) => item.status === 'available'); + + return ( +
+
+
+ 交易员档案 +
+
+ 聚焦查看每个 Agent 的模型、工具组、技能编排和工作区记忆,不展示交易表现数据 +
+
+
+
+ {agents.map((agent) => { + const isSelected = agent.id === selectedAgentId; + return ( + + ); + })} +
+ +
+
+
+ {selectedAgent.name} +
+
{selectedAgent.name}
+
{selectedAgent.role}
+
+ 当前档案已展开 +
+
+
+ +
+ {modelInfo.logoPath && ( + {modelInfo.provider} + )} +
+
模型
+
+ {getShortModelName(profile.model_name)} +
+
+
+
+ +
+
+
+
+
+
技能
+
+ 已启用: {activeSkills.length} / 已安装: {installedSkills.length} +
+
+
+ +
+
+ +
+ {isAgentSkillsLoading ? ( +
加载技能中...
+ ) : installedSkills.length === 0 ? ( +
暂无技能
+ ) : installedSkills.map((skill) => { + const isEnabled = skill.status === 'enabled' || skill.status === 'active'; + const saving = agentSkillsSavingKey === `${selectedAgentId}:${skill.skill_name}` || agentSkillsSavingKey === `${selectedAgentId}:${skill.skill_name}:content` || agentSkillsSavingKey === `${selectedAgentId}:${skill.skill_name}:delete` || agentSkillsSavingKey === `${selectedAgentId}:${skill.skill_name}:remove`; + const isExpanded = expandedSkillKey === skill.skill_name; + const detailKey = `${selectedAgentId}:${skill.skill_name}`; + const skillDetail = skillDetailsByName?.[detailKey] || null; + const skillDraft = localSkillDraftsByKey?.[detailKey] ?? ''; + const isDetailLoading = skillDetailLoadingKey === detailKey; + const isLocalSkill = skill.source === 'local'; + return ( +
+
+ +
+ + {isLocalSkill ? ( + + ) : ( + + )} +
+
+ + {isExpanded && ( +
+
+ {isDetailLoading + ? '加载技能说明中...' + : (skillDetail?.content || '暂无更详细的技能说明')} +
+ {isLocalSkill && !isDetailLoading && ( +
+
+ 本地技能 SKILL.md +
+