Add per-agent skill workspaces and TraderView management
This commit is contained in:
75
backend/agents/agent_workspace.py
Normal file
75
backend/agents/agent_workspace.py
Normal file
@@ -0,0 +1,75 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Per-agent run-scoped workspace configuration helpers."""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class AgentWorkspaceConfig:
|
||||
"""Structured agent config loaded from runs/<config>/agents/<agent>/agent.yaml."""
|
||||
|
||||
values: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
@property
|
||||
def prompt_files(self) -> Optional[List[str]]:
|
||||
raw = self.values.get("prompt_files")
|
||||
if not isinstance(raw, list):
|
||||
return None
|
||||
files = [
|
||||
str(item).strip()
|
||||
for item in raw
|
||||
if isinstance(item, str) and str(item).strip()
|
||||
]
|
||||
return files or None
|
||||
|
||||
@property
|
||||
def enabled_skills(self) -> List[str]:
|
||||
return _normalized_string_list(self.values.get("enabled_skills"))
|
||||
|
||||
@property
|
||||
def disabled_skills(self) -> List[str]:
|
||||
return _normalized_string_list(self.values.get("disabled_skills"))
|
||||
|
||||
@property
|
||||
def active_tool_groups(self) -> Optional[List[str]]:
|
||||
groups = _normalized_string_list(self.values.get("active_tool_groups"))
|
||||
return groups or None
|
||||
|
||||
@property
|
||||
def disabled_tool_groups(self) -> List[str]:
|
||||
return _normalized_string_list(self.values.get("disabled_tool_groups"))
|
||||
|
||||
def get(self, key: str, default: Any = None) -> Any:
|
||||
return self.values.get(key, default)
|
||||
|
||||
|
||||
def _normalized_string_list(raw: Any) -> List[str]:
|
||||
if not isinstance(raw, list):
|
||||
return []
|
||||
seen: List[str] = []
|
||||
for item in raw:
|
||||
if not isinstance(item, str):
|
||||
continue
|
||||
value = item.strip()
|
||||
if value and value not in seen:
|
||||
seen.append(value)
|
||||
return seen
|
||||
|
||||
|
||||
def load_agent_workspace_config(path: Path) -> AgentWorkspaceConfig:
|
||||
"""Load agent.yaml if present."""
|
||||
if not path.exists() or not path.is_file():
|
||||
return AgentWorkspaceConfig()
|
||||
|
||||
raw = path.read_text(encoding="utf-8").strip()
|
||||
if not raw:
|
||||
return AgentWorkspaceConfig()
|
||||
|
||||
parsed = yaml.safe_load(raw) or {}
|
||||
if not isinstance(parsed, dict):
|
||||
parsed = {}
|
||||
return AgentWorkspaceConfig(values=parsed)
|
||||
@@ -4,6 +4,7 @@
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
from .agent_workspace import load_agent_workspace_config
|
||||
from backend.config.bootstrap_config import get_bootstrap_config_for_run
|
||||
from .prompt_loader import PromptLoader
|
||||
from .skills_manager import SkillsManager
|
||||
@@ -23,6 +24,26 @@ def _append_section(parts: list[str], title: str, content: str) -> None:
|
||||
parts.append(f"## {title}\n{content}")
|
||||
|
||||
|
||||
def _build_skill_metadata_summary(skills_manager: SkillsManager, config_name: str, agent_id: str) -> str:
|
||||
"""Create a compact summary of active skills for prompt routing."""
|
||||
metadata_items = skills_manager.list_active_skill_metadata(config_name, agent_id)
|
||||
if not metadata_items:
|
||||
return ""
|
||||
|
||||
lines: list[str] = [
|
||||
"You can use the following active skills. Prefer the most relevant one, then read its SKILL.md if needed for detailed workflow:",
|
||||
]
|
||||
for item in metadata_items:
|
||||
parts = [f"- `{item.skill_name}`"]
|
||||
if item.description:
|
||||
parts.append(item.description)
|
||||
if item.version:
|
||||
parts.append(f"version: {item.version}")
|
||||
parts.append(f"path: {item.path}")
|
||||
lines.append(" | ".join(parts))
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def build_agent_system_prompt(
|
||||
agent_id: str,
|
||||
config_name: str,
|
||||
@@ -31,6 +52,13 @@ def build_agent_system_prompt(
|
||||
) -> str:
|
||||
"""Build the final system prompt for an agent."""
|
||||
sections: list[str] = []
|
||||
canonical_agent_id = (
|
||||
"portfolio_manager"
|
||||
if "portfolio" in agent_id
|
||||
else "risk_manager"
|
||||
if "risk" in agent_id and not analyst_type
|
||||
else agent_id
|
||||
)
|
||||
|
||||
if analyst_type:
|
||||
personas_config = _prompt_loader.load_yaml_config(
|
||||
@@ -56,11 +84,21 @@ def build_agent_system_prompt(
|
||||
"portfolio_manager",
|
||||
"system",
|
||||
)
|
||||
elif canonical_agent_id == "portfolio_manager":
|
||||
base_prompt = _prompt_loader.load_prompt(
|
||||
"portfolio_manager",
|
||||
"system",
|
||||
)
|
||||
elif agent_id == "risk_manager":
|
||||
base_prompt = _prompt_loader.load_prompt(
|
||||
"risk_manager",
|
||||
"system",
|
||||
)
|
||||
elif canonical_agent_id == "risk_manager":
|
||||
base_prompt = _prompt_loader.load_prompt(
|
||||
"risk_manager",
|
||||
"system",
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unsupported agent prompt build for: {agent_id}")
|
||||
|
||||
@@ -69,6 +107,7 @@ def build_agent_system_prompt(
|
||||
skills_manager = SkillsManager()
|
||||
asset_dir = skills_manager.get_agent_asset_dir(config_name, agent_id)
|
||||
asset_dir.mkdir(parents=True, exist_ok=True)
|
||||
agent_config = load_agent_workspace_config(asset_dir / "agent.yaml")
|
||||
bootstrap_config = get_bootstrap_config_for_run(
|
||||
skills_manager.project_root,
|
||||
config_name,
|
||||
@@ -80,26 +119,62 @@ def build_agent_system_prompt(
|
||||
bootstrap_config.prompt_body,
|
||||
)
|
||||
|
||||
_append_section(
|
||||
sections,
|
||||
"Role",
|
||||
_read_file_if_exists(asset_dir / "ROLE.md"),
|
||||
)
|
||||
_append_section(
|
||||
sections,
|
||||
"Style",
|
||||
_read_file_if_exists(asset_dir / "STYLE.md"),
|
||||
)
|
||||
_append_section(
|
||||
sections,
|
||||
"Policy",
|
||||
_read_file_if_exists(asset_dir / "POLICY.md"),
|
||||
)
|
||||
prompt_files = agent_config.prompt_files or [
|
||||
"SOUL.md",
|
||||
"PROFILE.md",
|
||||
"AGENTS.md",
|
||||
"POLICY.md",
|
||||
"MEMORY.md",
|
||||
]
|
||||
included_files = set(prompt_files)
|
||||
title_map = {
|
||||
"SOUL.md": "Soul",
|
||||
"PROFILE.md": "Profile",
|
||||
"AGENTS.md": "Agent Guide",
|
||||
"POLICY.md": "Policy",
|
||||
"MEMORY.md": "Memory",
|
||||
"HEARTBEAT.md": "Heartbeat",
|
||||
"ROLE.md": "Role",
|
||||
"STYLE.md": "Style",
|
||||
}
|
||||
for filename in prompt_files:
|
||||
_append_section(
|
||||
sections,
|
||||
title_map.get(filename, filename),
|
||||
_read_file_if_exists(asset_dir / filename),
|
||||
)
|
||||
|
||||
if "ROLE.md" not in included_files:
|
||||
_append_section(
|
||||
sections,
|
||||
"Role",
|
||||
_read_file_if_exists(asset_dir / "ROLE.md"),
|
||||
)
|
||||
if "STYLE.md" not in included_files:
|
||||
_append_section(
|
||||
sections,
|
||||
"Style",
|
||||
_read_file_if_exists(asset_dir / "STYLE.md"),
|
||||
)
|
||||
if "POLICY.md" not in included_files:
|
||||
_append_section(
|
||||
sections,
|
||||
"Policy",
|
||||
_read_file_if_exists(asset_dir / "POLICY.md"),
|
||||
)
|
||||
|
||||
skill_prompt = toolkit.get_agent_skill_prompt()
|
||||
if skill_prompt:
|
||||
_append_section(sections, "Skills", str(skill_prompt))
|
||||
|
||||
metadata_summary = _build_skill_metadata_summary(
|
||||
skills_manager=skills_manager,
|
||||
config_name=config_name,
|
||||
agent_id=agent_id,
|
||||
)
|
||||
if metadata_summary:
|
||||
_append_section(sections, "Active Skill Catalog", metadata_summary)
|
||||
|
||||
activated_notes = toolkit.get_activated_notes()
|
||||
if activated_notes:
|
||||
_append_section(sections, "Tool Usage Notes", str(activated_notes))
|
||||
|
||||
79
backend/agents/skill_metadata.py
Normal file
79
backend/agents/skill_metadata.py
Normal file
@@ -0,0 +1,79 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Skill metadata parsing helpers for SKILL.md files."""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class SkillMetadata:
|
||||
"""Parsed metadata for a skill package."""
|
||||
|
||||
skill_name: str
|
||||
path: Path
|
||||
source: str
|
||||
name: str
|
||||
description: str
|
||||
version: str = ""
|
||||
tools: List[str] = field(default_factory=list)
|
||||
|
||||
|
||||
def parse_skill_metadata(skill_dir: Path, source: str) -> SkillMetadata:
|
||||
"""Parse SKILL.md frontmatter with a forgiving schema."""
|
||||
skill_name = skill_dir.name
|
||||
skill_file = skill_dir / "SKILL.md"
|
||||
if not skill_file.exists():
|
||||
return SkillMetadata(
|
||||
skill_name=skill_name,
|
||||
path=skill_dir,
|
||||
source=source,
|
||||
name=skill_name,
|
||||
description="",
|
||||
)
|
||||
|
||||
raw = skill_file.read_text(encoding="utf-8").strip()
|
||||
frontmatter = {}
|
||||
body = raw
|
||||
if raw.startswith("---"):
|
||||
parts = raw.split("---", 2)
|
||||
if len(parts) >= 3:
|
||||
try:
|
||||
frontmatter = yaml.safe_load(parts[1].strip()) or {}
|
||||
except yaml.YAMLError:
|
||||
frontmatter = {}
|
||||
body = parts[2].strip()
|
||||
if not isinstance(frontmatter, dict):
|
||||
frontmatter = {}
|
||||
|
||||
description = str(frontmatter.get("description") or "").strip()
|
||||
if not description and body:
|
||||
description = body.splitlines()[0].strip().lstrip("#").strip()
|
||||
|
||||
return SkillMetadata(
|
||||
skill_name=skill_name,
|
||||
path=skill_dir,
|
||||
source=source,
|
||||
name=str(frontmatter.get("name") or skill_name).strip() or skill_name,
|
||||
description=description,
|
||||
version=str(frontmatter.get("version") or "").strip(),
|
||||
tools=_string_list(frontmatter.get("tools")),
|
||||
)
|
||||
|
||||
|
||||
def _string_list(value) -> List[str]:
|
||||
if isinstance(value, str):
|
||||
item = value.strip()
|
||||
return [item] if item else []
|
||||
if not isinstance(value, list):
|
||||
return []
|
||||
seen: List[str] = []
|
||||
for item in value:
|
||||
if not isinstance(item, str):
|
||||
continue
|
||||
normalized = item.strip()
|
||||
if normalized and normalized not in seen:
|
||||
seen.append(normalized)
|
||||
return seen
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Manage builtin/customized/active skill directories for each run."""
|
||||
"""Manage agent-installed and run-active skill directories for each run."""
|
||||
|
||||
from pathlib import Path
|
||||
import shutil
|
||||
@@ -7,6 +7,8 @@ from typing import Dict, Iterable, List
|
||||
|
||||
import yaml
|
||||
|
||||
from backend.agents.agent_workspace import load_agent_workspace_config
|
||||
from backend.agents.skill_metadata import SkillMetadata, parse_skill_metadata
|
||||
from backend.config.bootstrap_config import get_bootstrap_config_for_run
|
||||
|
||||
|
||||
@@ -26,12 +28,283 @@ class SkillsManager:
|
||||
def get_active_root(self, config_name: str) -> Path:
|
||||
return self.runs_root / config_name / "skills" / "active"
|
||||
|
||||
def get_agent_skills_root(self, config_name: str, agent_id: str) -> Path:
|
||||
return self.get_agent_asset_dir(config_name, agent_id) / "skills"
|
||||
|
||||
def get_agent_active_root(self, config_name: str, agent_id: str) -> Path:
|
||||
return self.get_agent_skills_root(config_name, agent_id) / "active"
|
||||
|
||||
def get_agent_installed_root(self, config_name: str, agent_id: str) -> Path:
|
||||
return self.get_agent_skills_root(config_name, agent_id) / "installed"
|
||||
|
||||
def get_agent_disabled_root(self, config_name: str, agent_id: str) -> Path:
|
||||
return self.get_agent_skills_root(config_name, agent_id) / "disabled"
|
||||
|
||||
def get_agent_local_root(self, config_name: str, agent_id: str) -> Path:
|
||||
return self.get_agent_skills_root(config_name, agent_id) / "local"
|
||||
|
||||
def get_activation_manifest_path(self, config_name: str) -> Path:
|
||||
return self.runs_root / config_name / "skills" / "activation.yaml"
|
||||
|
||||
def get_agent_asset_dir(self, config_name: str, agent_id: str) -> Path:
|
||||
return self.runs_root / config_name / "agents" / agent_id
|
||||
|
||||
def list_skill_catalog(self) -> List[SkillMetadata]:
|
||||
"""Return builtin/customized skills with parsed metadata."""
|
||||
catalog: Dict[str, SkillMetadata] = {}
|
||||
|
||||
for source, root in (
|
||||
("builtin", self.builtin_root),
|
||||
("customized", self.customized_root),
|
||||
):
|
||||
if not root.exists():
|
||||
continue
|
||||
for skill_dir in sorted(root.iterdir(), key=lambda item: item.name):
|
||||
if not skill_dir.is_dir():
|
||||
continue
|
||||
if not (skill_dir / "SKILL.md").exists():
|
||||
continue
|
||||
metadata = parse_skill_metadata(skill_dir, source=source)
|
||||
catalog[metadata.skill_name] = metadata
|
||||
|
||||
return sorted(catalog.values(), key=lambda item: item.skill_name)
|
||||
|
||||
def list_agent_skill_catalog(
|
||||
self,
|
||||
config_name: str,
|
||||
agent_id: str,
|
||||
) -> List[SkillMetadata]:
|
||||
"""Return shared plus agent-local skills for one agent."""
|
||||
catalog = {
|
||||
item.skill_name: item
|
||||
for item in self.list_skill_catalog()
|
||||
}
|
||||
for item in self.list_agent_local_skills(config_name, agent_id):
|
||||
catalog[item.skill_name] = item
|
||||
return sorted(catalog.values(), key=lambda item: item.skill_name)
|
||||
|
||||
def list_active_skill_metadata(
|
||||
self,
|
||||
config_name: str,
|
||||
agent_id: str,
|
||||
) -> List[SkillMetadata]:
|
||||
"""Return metadata for active skills synced for one agent."""
|
||||
active_root = self.get_agent_active_root(config_name, agent_id)
|
||||
if not active_root.exists():
|
||||
return []
|
||||
|
||||
items: List[SkillMetadata] = []
|
||||
for skill_dir in sorted(active_root.iterdir(), key=lambda item: item.name):
|
||||
if not skill_dir.is_dir():
|
||||
continue
|
||||
if not (skill_dir / "SKILL.md").exists():
|
||||
continue
|
||||
items.append(parse_skill_metadata(skill_dir, source="active"))
|
||||
return items
|
||||
|
||||
def list_agent_local_skills(
|
||||
self,
|
||||
config_name: str,
|
||||
agent_id: str,
|
||||
) -> List[SkillMetadata]:
|
||||
"""Return metadata for agent-private local skills."""
|
||||
local_root = self.get_agent_local_root(config_name, agent_id)
|
||||
if not local_root.exists():
|
||||
return []
|
||||
|
||||
items: List[SkillMetadata] = []
|
||||
for skill_dir in sorted(local_root.iterdir(), key=lambda item: item.name):
|
||||
if not skill_dir.is_dir():
|
||||
continue
|
||||
if not (skill_dir / "SKILL.md").exists():
|
||||
continue
|
||||
items.append(parse_skill_metadata(skill_dir, source="local"))
|
||||
return items
|
||||
|
||||
def load_skill_document(self, skill_name: str) -> Dict[str, object]:
|
||||
"""Return skill metadata plus markdown body for one skill."""
|
||||
source_dir = self._resolve_source_dir(skill_name)
|
||||
return self._load_skill_document_from_dir(
|
||||
source_dir,
|
||||
source="customized" if source_dir.parent == self.customized_root else "builtin",
|
||||
)
|
||||
|
||||
def load_agent_skill_document(
|
||||
self,
|
||||
config_name: str,
|
||||
agent_id: str,
|
||||
skill_name: str,
|
||||
) -> Dict[str, object]:
|
||||
"""Return skill metadata plus markdown body for one agent-visible skill."""
|
||||
source_dir = self._resolve_agent_skill_source_dir(
|
||||
config_name=config_name,
|
||||
agent_id=agent_id,
|
||||
skill_name=skill_name,
|
||||
)
|
||||
source = "local"
|
||||
if source_dir.parent == self.customized_root:
|
||||
source = "customized"
|
||||
elif source_dir.parent == self.builtin_root:
|
||||
source = "builtin"
|
||||
elif source_dir.parent == self.get_agent_installed_root(config_name, agent_id):
|
||||
source = "installed"
|
||||
return self._load_skill_document_from_dir(source_dir, source=source)
|
||||
|
||||
def create_agent_local_skill(
|
||||
self,
|
||||
config_name: str,
|
||||
agent_id: str,
|
||||
skill_name: str,
|
||||
) -> Path:
|
||||
"""Create a new local skill directory with a default SKILL.md."""
|
||||
normalized = _normalize_skill_name(skill_name)
|
||||
if not normalized:
|
||||
raise ValueError("Skill name is required.")
|
||||
local_root = self.get_agent_local_root(config_name, agent_id)
|
||||
local_root.mkdir(parents=True, exist_ok=True)
|
||||
skill_dir = local_root / normalized
|
||||
if skill_dir.exists():
|
||||
raise FileExistsError(f"Local skill already exists: {normalized}")
|
||||
skill_dir.mkdir(parents=True, exist_ok=False)
|
||||
(skill_dir / "SKILL.md").write_text(
|
||||
"---\n"
|
||||
f"name: {normalized}\n"
|
||||
"description: 当用户提出与该本地技能相关的专门任务时,应使用此技能。\n"
|
||||
"version: 1.0.0\n"
|
||||
"---\n\n"
|
||||
f"# {normalized}\n\n"
|
||||
"在这里描述该交易员的专有分析流程、判断框架和可复用步骤。\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
return skill_dir
|
||||
|
||||
def update_agent_local_skill(
|
||||
self,
|
||||
config_name: str,
|
||||
agent_id: str,
|
||||
skill_name: str,
|
||||
content: str,
|
||||
) -> Path:
|
||||
"""Overwrite one agent-local SKILL.md."""
|
||||
normalized = _normalize_skill_name(skill_name)
|
||||
if not normalized:
|
||||
raise ValueError("Skill name is required.")
|
||||
skill_dir = self.get_agent_local_root(config_name, agent_id) / normalized
|
||||
if not skill_dir.exists():
|
||||
raise FileNotFoundError(f"Unknown local skill: {normalized}")
|
||||
(skill_dir / "SKILL.md").write_text(content, encoding="utf-8")
|
||||
return skill_dir
|
||||
|
||||
def delete_agent_local_skill(
|
||||
self,
|
||||
config_name: str,
|
||||
agent_id: str,
|
||||
skill_name: str,
|
||||
) -> None:
|
||||
"""Delete one agent-local skill directory."""
|
||||
normalized = _normalize_skill_name(skill_name)
|
||||
if not normalized:
|
||||
raise ValueError("Skill name is required.")
|
||||
skill_dir = self.get_agent_local_root(config_name, agent_id) / normalized
|
||||
if not skill_dir.exists():
|
||||
raise FileNotFoundError(f"Unknown local skill: {normalized}")
|
||||
shutil.rmtree(skill_dir)
|
||||
|
||||
def _load_skill_document_from_dir(
|
||||
self,
|
||||
source_dir: Path,
|
||||
*,
|
||||
source: str,
|
||||
) -> Dict[str, object]:
|
||||
"""Return metadata plus markdown body for one resolved skill directory."""
|
||||
metadata = parse_skill_metadata(
|
||||
source_dir,
|
||||
source=source,
|
||||
)
|
||||
skill_file = source_dir / "SKILL.md"
|
||||
raw = skill_file.read_text(encoding="utf-8").strip() if skill_file.exists() else ""
|
||||
body = raw
|
||||
if raw.startswith("---"):
|
||||
parts = raw.split("---", 2)
|
||||
if len(parts) >= 3:
|
||||
body = parts[2].strip()
|
||||
|
||||
return {
|
||||
"skill_name": metadata.skill_name,
|
||||
"name": metadata.name,
|
||||
"description": metadata.description,
|
||||
"version": metadata.version,
|
||||
"tools": metadata.tools,
|
||||
"source": metadata.source,
|
||||
"content": body,
|
||||
}
|
||||
|
||||
def update_agent_skill_overrides(
|
||||
self,
|
||||
config_name: str,
|
||||
agent_id: str,
|
||||
*,
|
||||
enable: Iterable[str] | None = None,
|
||||
disable: Iterable[str] | None = None,
|
||||
) -> Dict[str, List[str]]:
|
||||
"""Persist per-agent enabled/disabled skill overrides in agent.yaml."""
|
||||
asset_dir = self.get_agent_asset_dir(config_name, agent_id)
|
||||
asset_dir.mkdir(parents=True, exist_ok=True)
|
||||
config_path = asset_dir / "agent.yaml"
|
||||
current = load_agent_workspace_config(config_path)
|
||||
values = dict(current.values)
|
||||
|
||||
enabled = _dedupe_preserve_order(current.enabled_skills)
|
||||
disabled_set = set(current.disabled_skills)
|
||||
|
||||
for skill_name in enable or []:
|
||||
if skill_name not in enabled:
|
||||
enabled.append(skill_name)
|
||||
disabled_set.discard(skill_name)
|
||||
|
||||
for skill_name in disable or []:
|
||||
disabled_set.add(skill_name)
|
||||
enabled = [item for item in enabled if item != skill_name]
|
||||
|
||||
values["enabled_skills"] = enabled
|
||||
values["disabled_skills"] = sorted(disabled_set)
|
||||
config_path.write_text(
|
||||
yaml.safe_dump(values, allow_unicode=True, sort_keys=False),
|
||||
encoding="utf-8",
|
||||
)
|
||||
return {
|
||||
"enabled_skills": enabled,
|
||||
"disabled_skills": sorted(disabled_set),
|
||||
}
|
||||
|
||||
def forget_agent_skill_overrides(
|
||||
self,
|
||||
config_name: str,
|
||||
agent_id: str,
|
||||
skill_names: Iterable[str],
|
||||
) -> Dict[str, List[str]]:
|
||||
"""Remove skills from both enabled/disabled overrides in agent.yaml."""
|
||||
asset_dir = self.get_agent_asset_dir(config_name, agent_id)
|
||||
asset_dir.mkdir(parents=True, exist_ok=True)
|
||||
config_path = asset_dir / "agent.yaml"
|
||||
current = load_agent_workspace_config(config_path)
|
||||
values = dict(current.values)
|
||||
removed = set(skill_names)
|
||||
|
||||
enabled = [item for item in current.enabled_skills if item not in removed]
|
||||
disabled = [item for item in current.disabled_skills if item not in removed]
|
||||
|
||||
values["enabled_skills"] = enabled
|
||||
values["disabled_skills"] = disabled
|
||||
config_path.write_text(
|
||||
yaml.safe_dump(values, allow_unicode=True, sort_keys=False),
|
||||
encoding="utf-8",
|
||||
)
|
||||
return {
|
||||
"enabled_skills": enabled,
|
||||
"disabled_skills": disabled,
|
||||
}
|
||||
|
||||
def ensure_activation_manifest(self, config_name: str) -> Path:
|
||||
manifest_path = self.get_activation_manifest_path(config_name)
|
||||
manifest_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
@@ -62,6 +335,34 @@ class SkillsManager:
|
||||
|
||||
raise FileNotFoundError(f"Unknown skill: {skill_name}")
|
||||
|
||||
def _resolve_agent_skill_source_dir(
|
||||
self,
|
||||
config_name: str,
|
||||
agent_id: str,
|
||||
skill_name: str,
|
||||
) -> Path:
|
||||
"""Resolve one skill from the agent-local workspace or shared registry."""
|
||||
for root in (
|
||||
self.get_agent_local_root(config_name, agent_id),
|
||||
self.get_agent_installed_root(config_name, agent_id),
|
||||
):
|
||||
candidate = root / skill_name
|
||||
if candidate.exists() and (candidate / "SKILL.md").exists():
|
||||
return candidate
|
||||
return self._resolve_source_dir(skill_name)
|
||||
|
||||
def _skill_exists_for_agent(
|
||||
self,
|
||||
config_name: str,
|
||||
agent_id: str,
|
||||
skill_name: str,
|
||||
) -> bool:
|
||||
try:
|
||||
self._resolve_agent_skill_source_dir(config_name, agent_id, skill_name)
|
||||
except FileNotFoundError:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _persist_runtime_edits(
|
||||
self,
|
||||
config_name: str,
|
||||
@@ -125,6 +426,13 @@ class SkillsManager:
|
||||
bootstrap = get_bootstrap_config_for_run(self.project_root, config_name)
|
||||
override = bootstrap.agent_override(agent_id)
|
||||
skills = list(override.get("skills", list(default_skills)))
|
||||
agent_config = load_agent_workspace_config(
|
||||
self.get_agent_asset_dir(config_name, agent_id) / "agent.yaml",
|
||||
)
|
||||
|
||||
for skill_name in agent_config.enabled_skills:
|
||||
if skill_name not in skills:
|
||||
skills.append(skill_name)
|
||||
|
||||
manifest = self.load_activation_manifest(config_name)
|
||||
for skill_name in manifest.get("global_enabled_skills", []):
|
||||
@@ -139,51 +447,61 @@ class SkillsManager:
|
||||
disabled.update(
|
||||
manifest.get("agent_disabled_skills", {}).get(agent_id, []),
|
||||
)
|
||||
disabled.update(agent_config.disabled_skills)
|
||||
|
||||
return [skill for skill in skills if skill not in disabled]
|
||||
for item in self.list_agent_local_skills(config_name, agent_id):
|
||||
if item.skill_name not in skills:
|
||||
skills.append(item.skill_name)
|
||||
|
||||
def sync_active_skills(
|
||||
return [
|
||||
skill
|
||||
for skill in skills
|
||||
if skill not in disabled
|
||||
and self._skill_exists_for_agent(config_name, agent_id, skill)
|
||||
]
|
||||
|
||||
def sync_skill_dirs(
|
||||
self,
|
||||
config_name: str,
|
||||
skill_names: Iterable[str],
|
||||
target_root: Path,
|
||||
skill_sources: Dict[str, Path],
|
||||
) -> List[Path]:
|
||||
"""Sync selected skills into the run workspace and return their paths."""
|
||||
active_root = self.get_active_root(config_name)
|
||||
active_root.mkdir(parents=True, exist_ok=True)
|
||||
"""Sync selected skill directories into one target root."""
|
||||
target_root.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
synced_paths: List[Path] = []
|
||||
wanted = set(skill_names)
|
||||
wanted = set(skill_sources)
|
||||
|
||||
for existing in active_root.iterdir():
|
||||
for existing in target_root.iterdir():
|
||||
if existing.is_dir() and existing.name not in wanted:
|
||||
self._persist_runtime_edits(
|
||||
config_name=config_name,
|
||||
skill_name=existing.name,
|
||||
active_dir=existing,
|
||||
)
|
||||
shutil.rmtree(existing)
|
||||
|
||||
for skill_name in skill_names:
|
||||
source_dir = self._resolve_source_dir(skill_name)
|
||||
target_dir = active_root / skill_name
|
||||
for skill_name, source_dir in skill_sources.items():
|
||||
target_dir = target_root / skill_name
|
||||
if target_dir.exists():
|
||||
self._persist_runtime_edits(
|
||||
config_name=config_name,
|
||||
skill_name=skill_name,
|
||||
active_dir=target_dir,
|
||||
)
|
||||
shutil.rmtree(target_dir)
|
||||
shutil.copytree(source_dir, target_dir)
|
||||
synced_paths.append(target_dir)
|
||||
|
||||
return synced_paths
|
||||
|
||||
def sync_active_skills(
|
||||
self,
|
||||
target_root: Path,
|
||||
skill_names: Iterable[str],
|
||||
) -> List[Path]:
|
||||
"""Sync selected shared skills into one active directory."""
|
||||
skill_sources = {
|
||||
skill_name: self._resolve_source_dir(skill_name)
|
||||
for skill_name in skill_names
|
||||
}
|
||||
return self.sync_skill_dirs(target_root, skill_sources)
|
||||
|
||||
def prepare_active_skills(
|
||||
self,
|
||||
config_name: str,
|
||||
agent_defaults: Dict[str, Iterable[str]],
|
||||
) -> Dict[str, List[Path]]:
|
||||
"""Resolve all agent skills, sync the union once, and map paths per agent."""
|
||||
"""Resolve all agent skills into per-agent installed/active workspaces."""
|
||||
resolved: Dict[str, List[str]] = {}
|
||||
union: List[str] = []
|
||||
|
||||
@@ -198,10 +516,112 @@ class SkillsManager:
|
||||
if skill_name not in union:
|
||||
union.append(skill_name)
|
||||
|
||||
self.sync_active_skills(config_name=config_name, skill_names=union)
|
||||
active_root = self.get_active_root(config_name)
|
||||
# Maintain the legacy union directory for compatibility/debugging.
|
||||
# Agent-local skills remain private to the agent workspace.
|
||||
self.sync_active_skills(
|
||||
target_root=self.get_active_root(config_name),
|
||||
skill_names=[
|
||||
skill_name
|
||||
for skill_name in union
|
||||
if self._is_shared_skill(skill_name)
|
||||
],
|
||||
)
|
||||
|
||||
return {
|
||||
agent_id: [active_root / skill_name for skill_name in skill_names]
|
||||
for agent_id, skill_names in resolved.items()
|
||||
}
|
||||
active_map: Dict[str, List[Path]] = {}
|
||||
for agent_id, skill_names in resolved.items():
|
||||
installed_sources = {
|
||||
skill_name: self._resolve_source_dir(skill_name)
|
||||
for skill_name in skill_names
|
||||
if (self.get_agent_local_root(config_name, agent_id) / skill_name).exists() is False
|
||||
}
|
||||
installed_paths = self.sync_skill_dirs(
|
||||
target_root=self.get_agent_installed_root(config_name, agent_id),
|
||||
skill_sources=installed_sources,
|
||||
)
|
||||
|
||||
local_root = self.get_agent_local_root(config_name, agent_id)
|
||||
local_sources = {
|
||||
skill_name: local_root / skill_name
|
||||
for skill_name in skill_names
|
||||
if (local_root / skill_name).exists()
|
||||
}
|
||||
active_sources = {
|
||||
path.name: path for path in installed_paths
|
||||
}
|
||||
active_sources.update(local_sources)
|
||||
active_map[agent_id] = self.sync_skill_dirs(
|
||||
target_root=self.get_agent_active_root(config_name, agent_id),
|
||||
skill_sources=active_sources,
|
||||
)
|
||||
|
||||
disabled_names = _dedupe_preserve_order(
|
||||
self._resolve_disabled_skill_names(
|
||||
config_name=config_name,
|
||||
agent_id=agent_id,
|
||||
default_skills=agent_defaults.get(agent_id, []),
|
||||
),
|
||||
)
|
||||
disabled_sources = {
|
||||
skill_name: self._resolve_agent_skill_source_dir(
|
||||
config_name=config_name,
|
||||
agent_id=agent_id,
|
||||
skill_name=skill_name,
|
||||
)
|
||||
for skill_name in disabled_names
|
||||
}
|
||||
self.sync_skill_dirs(
|
||||
target_root=self.get_agent_disabled_root(config_name, agent_id),
|
||||
skill_sources=disabled_sources,
|
||||
)
|
||||
|
||||
return active_map
|
||||
|
||||
def _is_shared_skill(self, skill_name: str) -> bool:
|
||||
try:
|
||||
self._resolve_source_dir(skill_name)
|
||||
except FileNotFoundError:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _resolve_disabled_skill_names(
|
||||
self,
|
||||
config_name: str,
|
||||
agent_id: str,
|
||||
default_skills: Iterable[str],
|
||||
) -> List[str]:
|
||||
"""Resolve explicit disabled skills for one agent."""
|
||||
bootstrap = get_bootstrap_config_for_run(self.project_root, config_name)
|
||||
override = bootstrap.agent_override(agent_id)
|
||||
baseline = list(override.get("skills", list(default_skills)))
|
||||
agent_config = load_agent_workspace_config(
|
||||
self.get_agent_asset_dir(config_name, agent_id) / "agent.yaml",
|
||||
)
|
||||
manifest = self.load_activation_manifest(config_name)
|
||||
disabled = list(manifest.get("global_disabled_skills", []))
|
||||
disabled.extend(manifest.get("agent_disabled_skills", {}).get(agent_id, []))
|
||||
disabled.extend(agent_config.disabled_skills)
|
||||
for skill_name in baseline:
|
||||
if skill_name in agent_config.disabled_skills and skill_name not in disabled:
|
||||
disabled.append(skill_name)
|
||||
for item in self.list_agent_local_skills(config_name, agent_id):
|
||||
if item.skill_name in agent_config.disabled_skills and item.skill_name not in disabled:
|
||||
disabled.append(item.skill_name)
|
||||
return [
|
||||
skill
|
||||
for skill in disabled
|
||||
if self._skill_exists_for_agent(config_name, agent_id, skill)
|
||||
]
|
||||
|
||||
|
||||
def _dedupe_preserve_order(items: Iterable[str]) -> List[str]:
|
||||
result: List[str] = []
|
||||
for item in items:
|
||||
if item not in result:
|
||||
result.append(item)
|
||||
return result
|
||||
|
||||
|
||||
def _normalize_skill_name(raw_name: str) -> str:
|
||||
normalized = str(raw_name or "").strip().lower().replace(" ", "_").replace("-", "_")
|
||||
allowed = [ch for ch in normalized if ch.isalnum() or ch == "_"]
|
||||
return "".join(allowed).strip("_")
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
from typing import Any, Dict, Iterable
|
||||
|
||||
from .agent_workspace import load_agent_workspace_config
|
||||
from backend.config.bootstrap_config import get_bootstrap_config_for_run
|
||||
import yaml
|
||||
|
||||
@@ -151,6 +152,9 @@ def create_agent_toolkit(
|
||||
profiles = load_agent_profiles()
|
||||
profile = profiles.get(agent_id, {})
|
||||
skills_manager = SkillsManager()
|
||||
agent_config = load_agent_workspace_config(
|
||||
skills_manager.get_agent_asset_dir(config_name, agent_id) / "agent.yaml",
|
||||
)
|
||||
bootstrap_config = get_bootstrap_config_for_run(
|
||||
skills_manager.project_root,
|
||||
config_name,
|
||||
@@ -158,8 +162,16 @@ def create_agent_toolkit(
|
||||
override = bootstrap_config.agent_override(agent_id)
|
||||
active_groups = override.get(
|
||||
"active_tool_groups",
|
||||
profile.get("active_tool_groups", []),
|
||||
agent_config.active_tool_groups
|
||||
or profile.get("active_tool_groups", []),
|
||||
)
|
||||
disabled_groups = set(agent_config.disabled_tool_groups)
|
||||
if disabled_groups:
|
||||
active_groups = [
|
||||
group_name
|
||||
for group_name in active_groups
|
||||
if group_name not in disabled_groups
|
||||
]
|
||||
|
||||
toolkit = Toolkit(
|
||||
agent_skill_instruction=(
|
||||
@@ -184,7 +196,7 @@ def create_agent_toolkit(
|
||||
default_skills=profile.get("skills", []),
|
||||
)
|
||||
active_skill_dirs = [
|
||||
skills_manager.get_active_root(config_name) / skill_name
|
||||
skills_manager.get_agent_active_root(config_name, agent_id) / skill_name
|
||||
for skill_name in skill_names
|
||||
]
|
||||
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
from pathlib import Path
|
||||
from typing import Dict, Iterable, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
from .skills_manager import SkillsManager
|
||||
|
||||
|
||||
@@ -59,6 +61,10 @@ class WorkspaceManager:
|
||||
agent_id,
|
||||
)
|
||||
asset_dir.mkdir(parents=True, exist_ok=True)
|
||||
(asset_dir / "skills" / "installed").mkdir(parents=True, exist_ok=True)
|
||||
(asset_dir / "skills" / "active").mkdir(parents=True, exist_ok=True)
|
||||
(asset_dir / "skills" / "disabled").mkdir(parents=True, exist_ok=True)
|
||||
(asset_dir / "skills" / "local").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
self._ensure_file(
|
||||
asset_dir / "ROLE.md",
|
||||
@@ -81,6 +87,35 @@ class WorkspaceManager:
|
||||
f"{policy_seed}".strip()
|
||||
+ "\n",
|
||||
)
|
||||
self._ensure_file(
|
||||
asset_dir / "SOUL.md",
|
||||
"# Soul\n\n"
|
||||
"Describe the agent's temperament, reasoning posture, and voice.\n\n",
|
||||
)
|
||||
self._ensure_file(
|
||||
asset_dir / "PROFILE.md",
|
||||
"# Profile\n\n"
|
||||
"Track this agent's long-lived investment style, preferences, and strengths.\n\n",
|
||||
)
|
||||
self._ensure_file(
|
||||
asset_dir / "AGENTS.md",
|
||||
"# Agent Guide\n\n"
|
||||
"Document how this agent should work, collaborate, and choose tools or skills.\n\n",
|
||||
)
|
||||
self._ensure_file(
|
||||
asset_dir / "MEMORY.md",
|
||||
"# Memory\n\n"
|
||||
"Store durable lessons, heuristics, and reminders for this agent.\n\n",
|
||||
)
|
||||
self._ensure_file(
|
||||
asset_dir / "HEARTBEAT.md",
|
||||
"# Heartbeat\n\n"
|
||||
"Optional checklist for periodic review or self-reflection.\n\n",
|
||||
)
|
||||
self._ensure_agent_yaml(
|
||||
asset_dir / "agent.yaml",
|
||||
agent_id=agent_id,
|
||||
)
|
||||
return asset_dir
|
||||
|
||||
def initialize_default_assets(
|
||||
@@ -138,3 +173,27 @@ class WorkspaceManager:
|
||||
def _ensure_file(path: Path, content: str) -> None:
|
||||
if not path.exists():
|
||||
path.write_text(content, encoding="utf-8")
|
||||
|
||||
@staticmethod
|
||||
def _ensure_agent_yaml(path: Path, agent_id: str) -> None:
|
||||
if path.exists():
|
||||
return
|
||||
|
||||
payload = {
|
||||
"agent_id": agent_id,
|
||||
"prompt_files": [
|
||||
"SOUL.md",
|
||||
"PROFILE.md",
|
||||
"AGENTS.md",
|
||||
"POLICY.md",
|
||||
"MEMORY.md",
|
||||
],
|
||||
"enabled_skills": [],
|
||||
"disabled_skills": [],
|
||||
"active_tool_groups": [],
|
||||
"disabled_tool_groups": [],
|
||||
}
|
||||
path.write_text(
|
||||
yaml.safe_dump(payload, allow_unicode=True, sort_keys=False),
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
192
backend/cli.py
192
backend/cli.py
@@ -24,7 +24,9 @@ from rich.prompt import Confirm
|
||||
from rich.table import Table
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from backend.agents.agent_workspace import load_agent_workspace_config
|
||||
from backend.agents.prompt_loader import PromptLoader
|
||||
from backend.agents.skills_manager import SkillsManager
|
||||
from backend.agents.workspace_manager import WorkspaceManager
|
||||
from backend.data.market_ingest import ingest_symbols
|
||||
from backend.data.market_store import MarketStore
|
||||
@@ -38,12 +40,21 @@ app = typer.Typer(
|
||||
)
|
||||
ingest_app = typer.Typer(help="Ingest Polygon market data into the research warehouse.")
|
||||
app.add_typer(ingest_app, name="ingest")
|
||||
skills_app = typer.Typer(help="Inspect and manage per-agent skills.")
|
||||
app.add_typer(skills_app, name="skills")
|
||||
|
||||
console = Console()
|
||||
_prompt_loader = PromptLoader()
|
||||
load_dotenv()
|
||||
|
||||
|
||||
def _normalize_typer_value(value, default):
|
||||
"""Allow CLI command functions to be called directly in tests/internal code."""
|
||||
if hasattr(value, "default"):
|
||||
return value.default
|
||||
return default if value is None else value
|
||||
|
||||
|
||||
def get_project_root() -> Path:
|
||||
"""Get the project root directory."""
|
||||
# Assuming cli.py is in backend/
|
||||
@@ -213,6 +224,19 @@ def initialize_workspace(config_name: str) -> Path:
|
||||
return workspace_manager.get_run_dir(config_name)
|
||||
|
||||
|
||||
def _require_agent_asset_dir(config_name: str, agent_id: str) -> Path:
|
||||
manager = WorkspaceManager(project_root=get_project_root())
|
||||
manager.initialize_default_assets(
|
||||
config_name=config_name,
|
||||
agent_ids=[agent_id],
|
||||
analyst_personas=_prompt_loader.load_yaml_config(
|
||||
"analyst",
|
||||
"personas",
|
||||
),
|
||||
)
|
||||
return manager.skills_manager.get_agent_asset_dir(config_name, agent_id)
|
||||
|
||||
|
||||
def _resolve_symbols(raw_tickers: Optional[str], config_name: Optional[str] = None) -> list[str]:
|
||||
"""Resolve symbols from explicit input or runtime bootstrap config."""
|
||||
if raw_tickers and raw_tickers.strip():
|
||||
@@ -622,6 +646,137 @@ def ingest_report(
|
||||
console.print(table)
|
||||
|
||||
|
||||
@skills_app.command("list")
|
||||
def skills_list(
|
||||
config_name: str = typer.Option(
|
||||
"default",
|
||||
"--config-name",
|
||||
"-c",
|
||||
help="Run config name.",
|
||||
),
|
||||
agent_id: Optional[str] = typer.Option(
|
||||
None,
|
||||
"--agent-id",
|
||||
"-a",
|
||||
help="Optional agent id to show resolved status for.",
|
||||
),
|
||||
):
|
||||
"""List available skills and optional agent-level enablement state."""
|
||||
project_root = get_project_root()
|
||||
skills_manager = SkillsManager(project_root=project_root)
|
||||
catalog = (
|
||||
skills_manager.list_agent_skill_catalog(config_name, agent_id)
|
||||
if agent_id
|
||||
else skills_manager.list_skill_catalog()
|
||||
)
|
||||
if not catalog:
|
||||
console.print("[yellow]No skills found[/yellow]")
|
||||
raise typer.Exit(0)
|
||||
|
||||
agent_config = None
|
||||
resolved_skills = set()
|
||||
if agent_id:
|
||||
asset_dir = _require_agent_asset_dir(config_name, agent_id)
|
||||
agent_config = load_agent_workspace_config(asset_dir / "agent.yaml")
|
||||
resolved_skills = set(
|
||||
skills_manager.resolve_agent_skill_names(
|
||||
config_name=config_name,
|
||||
agent_id=agent_id,
|
||||
default_skills=[],
|
||||
),
|
||||
)
|
||||
|
||||
table = Table(title="Skill Catalog")
|
||||
table.add_column("Skill", style="cyan")
|
||||
table.add_column("Source")
|
||||
table.add_column("Description")
|
||||
if agent_id:
|
||||
table.add_column("Status")
|
||||
|
||||
enabled = set(agent_config.enabled_skills) if agent_config else set()
|
||||
disabled = set(agent_config.disabled_skills) if agent_config else set()
|
||||
for skill in catalog:
|
||||
row = [
|
||||
skill.skill_name,
|
||||
skill.source,
|
||||
skill.description or "-",
|
||||
]
|
||||
if agent_id:
|
||||
if skill.skill_name in disabled:
|
||||
status = "disabled"
|
||||
elif skill.skill_name in enabled:
|
||||
status = "enabled"
|
||||
elif skill.skill_name in resolved_skills:
|
||||
status = "active"
|
||||
else:
|
||||
status = "-"
|
||||
row.append(status)
|
||||
table.add_row(*row)
|
||||
console.print(table)
|
||||
|
||||
|
||||
@skills_app.command("enable")
|
||||
def skills_enable(
|
||||
agent_id: str = typer.Option(..., "--agent-id", "-a", help="Agent id."),
|
||||
skill: str = typer.Option(..., "--skill", "-s", help="Skill name."),
|
||||
config_name: str = typer.Option(
|
||||
"default",
|
||||
"--config-name",
|
||||
"-c",
|
||||
help="Run config name.",
|
||||
),
|
||||
):
|
||||
"""Enable a skill for one agent in agent.yaml."""
|
||||
asset_dir = _require_agent_asset_dir(config_name, agent_id)
|
||||
skills_manager = SkillsManager(project_root=get_project_root())
|
||||
catalog = {
|
||||
item.skill_name
|
||||
for item in skills_manager.list_agent_skill_catalog(config_name, agent_id)
|
||||
}
|
||||
if skill not in catalog:
|
||||
console.print(f"[red]Unknown skill: {skill}[/red]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
result = skills_manager.update_agent_skill_overrides(
|
||||
config_name=config_name,
|
||||
agent_id=agent_id,
|
||||
enable=[skill],
|
||||
)
|
||||
console.print(
|
||||
f"[green]Enabled[/green] `{skill}` for `{agent_id}` "
|
||||
f"([{asset_dir / 'agent.yaml'}])",
|
||||
)
|
||||
console.print(f"Enabled skills: {', '.join(result['enabled_skills']) or '-'}")
|
||||
console.print(f"Disabled skills: {', '.join(result['disabled_skills']) or '-'}")
|
||||
|
||||
|
||||
@skills_app.command("disable")
|
||||
def skills_disable(
|
||||
agent_id: str = typer.Option(..., "--agent-id", "-a", help="Agent id."),
|
||||
skill: str = typer.Option(..., "--skill", "-s", help="Skill name."),
|
||||
config_name: str = typer.Option(
|
||||
"default",
|
||||
"--config-name",
|
||||
"-c",
|
||||
help="Run config name.",
|
||||
),
|
||||
):
|
||||
"""Disable a skill for one agent in agent.yaml."""
|
||||
asset_dir = _require_agent_asset_dir(config_name, agent_id)
|
||||
skills_manager = SkillsManager(project_root=get_project_root())
|
||||
result = skills_manager.update_agent_skill_overrides(
|
||||
config_name=config_name,
|
||||
agent_id=agent_id,
|
||||
disable=[skill],
|
||||
)
|
||||
console.print(
|
||||
f"[yellow]Disabled[/yellow] `{skill}` for `{agent_id}` "
|
||||
f"([{asset_dir / 'agent.yaml'}])",
|
||||
)
|
||||
console.print(f"Enabled skills: {', '.join(result['enabled_skills']) or '-'}")
|
||||
console.print(f"Disabled skills: {', '.join(result['disabled_skills']) or '-'}")
|
||||
|
||||
|
||||
@app.command()
|
||||
def backtest(
|
||||
start: Optional[str] = typer.Option(
|
||||
@@ -684,6 +839,7 @@ def backtest(
|
||||
border_style="cyan",
|
||||
),
|
||||
)
|
||||
poll_interval = int(_normalize_typer_value(poll_interval, 10))
|
||||
|
||||
# Validate dates - required for backtest
|
||||
if not start or not end:
|
||||
@@ -801,12 +957,22 @@ def live(
|
||||
"-p",
|
||||
help="WebSocket server port",
|
||||
),
|
||||
schedule_mode: str = typer.Option(
|
||||
"daily",
|
||||
"--schedule-mode",
|
||||
help="Scheduler mode: 'daily' or 'intraday'",
|
||||
),
|
||||
trigger_time: str = typer.Option(
|
||||
"now",
|
||||
"--trigger-time",
|
||||
"-t",
|
||||
help="Trigger time in LOCAL timezone (HH:MM), or 'now' to run immediately",
|
||||
),
|
||||
interval_minutes: int = typer.Option(
|
||||
60,
|
||||
"--interval-minutes",
|
||||
help="When schedule-mode=intraday, run every N minutes",
|
||||
),
|
||||
poll_interval: int = typer.Option(
|
||||
10,
|
||||
"--poll-interval",
|
||||
@@ -830,9 +996,12 @@ def live(
|
||||
evotraders live # Run immediately (default)
|
||||
evotraders live --mock # Mock mode
|
||||
evotraders live -t 22:30 # Run at 22:30 local time daily
|
||||
evotraders live --schedule-mode intraday --interval-minutes 60
|
||||
evotraders live --trigger-time now # Run immediately
|
||||
evotraders live --clean # Clear historical data before starting
|
||||
"""
|
||||
schedule_mode = str(_normalize_typer_value(schedule_mode, "daily"))
|
||||
interval_minutes = int(_normalize_typer_value(interval_minutes, 60))
|
||||
mode_name = "MOCK" if mock else "LIVE"
|
||||
console.print(
|
||||
Panel.fit(
|
||||
@@ -864,6 +1033,16 @@ def live(
|
||||
# Handle historical data cleanup
|
||||
handle_history_cleanup(config_name, auto_clean=clean)
|
||||
|
||||
if schedule_mode not in {"daily", "intraday"}:
|
||||
console.print(
|
||||
f"[red]Error: unsupported schedule mode '{schedule_mode}'[/red]",
|
||||
)
|
||||
raise typer.Exit(1)
|
||||
|
||||
if interval_minutes <= 0:
|
||||
console.print("[red]Error: --interval-minutes must be > 0[/red]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Convert local time to NYSE time
|
||||
nyse_tz = ZoneInfo("America/New_York")
|
||||
local_tz = datetime.now().astimezone().tzinfo
|
||||
@@ -871,7 +1050,9 @@ def live(
|
||||
nyse_now = datetime.now(nyse_tz)
|
||||
|
||||
# Convert trigger time from local to NYSE
|
||||
if trigger_time.lower() == "now":
|
||||
if schedule_mode == "intraday":
|
||||
nyse_trigger_time = "now"
|
||||
elif trigger_time.lower() == "now":
|
||||
nyse_trigger_time = "now"
|
||||
else:
|
||||
local_trigger = datetime.strptime(trigger_time, "%H:%M")
|
||||
@@ -891,7 +1072,10 @@ def live(
|
||||
console.print(
|
||||
f" NYSE Time: {nyse_now.strftime('%Y-%m-%d %H:%M:%S %Z')}",
|
||||
)
|
||||
if nyse_trigger_time == "now":
|
||||
console.print(f" Schedule: {schedule_mode}")
|
||||
if schedule_mode == "intraday":
|
||||
console.print(f" Interval: every {interval_minutes} minute(s)")
|
||||
elif nyse_trigger_time == "now":
|
||||
console.print(" Trigger: [green]NOW (immediate)[/green]")
|
||||
else:
|
||||
console.print(
|
||||
@@ -951,10 +1135,14 @@ def live(
|
||||
host,
|
||||
"--port",
|
||||
str(port),
|
||||
"--schedule-mode",
|
||||
schedule_mode,
|
||||
"--poll-interval",
|
||||
str(poll_interval),
|
||||
"--trigger-time",
|
||||
nyse_trigger_time,
|
||||
"--interval-minutes",
|
||||
str(interval_minutes),
|
||||
]
|
||||
|
||||
if mock:
|
||||
|
||||
@@ -48,6 +48,9 @@ def _resolve_runtime_config(args) -> dict:
|
||||
project_root=project_root,
|
||||
config_name=args.config_name,
|
||||
enable_memory=args.enable_memory,
|
||||
schedule_mode=args.schedule_mode,
|
||||
interval_minutes=args.interval_minutes,
|
||||
trigger_time=args.trigger_time,
|
||||
)
|
||||
|
||||
|
||||
@@ -261,6 +264,7 @@ async def run_with_gateway(args):
|
||||
# Create scheduler callback
|
||||
scheduler_callback = None
|
||||
trading_dates = []
|
||||
live_scheduler = None
|
||||
|
||||
if is_backtest:
|
||||
backtest_scheduler = BacktestScheduler(
|
||||
@@ -276,10 +280,11 @@ async def run_with_gateway(args):
|
||||
|
||||
scheduler_callback = scheduler_callback_fn
|
||||
else:
|
||||
# Live mode: use daily scheduler with NYSE timezone
|
||||
# Live mode: use daily or intraday scheduler with NYSE timezone
|
||||
live_scheduler = Scheduler(
|
||||
mode="daily",
|
||||
trigger_time=args.trigger_time,
|
||||
mode=runtime_config["schedule_mode"],
|
||||
trigger_time=runtime_config["trigger_time"],
|
||||
interval_minutes=runtime_config["interval_minutes"],
|
||||
config={"config_name": config_name},
|
||||
)
|
||||
|
||||
@@ -300,11 +305,15 @@ async def run_with_gateway(args):
|
||||
"backtest_mode": is_backtest,
|
||||
"tickers": tickers,
|
||||
"config_name": config_name,
|
||||
"schedule_mode": runtime_config["schedule_mode"],
|
||||
"interval_minutes": runtime_config["interval_minutes"],
|
||||
"trigger_time": runtime_config["trigger_time"],
|
||||
"initial_cash": initial_cash,
|
||||
"margin_requirement": margin_requirement,
|
||||
"max_comm_cycles": runtime_config["max_comm_cycles"],
|
||||
"enable_memory": runtime_config["enable_memory"],
|
||||
},
|
||||
scheduler=live_scheduler if not is_backtest else None,
|
||||
)
|
||||
|
||||
if is_backtest:
|
||||
@@ -325,7 +334,13 @@ def main():
|
||||
parser.add_argument("--config-name", default="mock")
|
||||
parser.add_argument("--host", default="0.0.0.0")
|
||||
parser.add_argument("--port", type=int, default=8765)
|
||||
parser.add_argument(
|
||||
"--schedule-mode",
|
||||
choices=["daily", "intraday"],
|
||||
default="daily",
|
||||
)
|
||||
parser.add_argument("--trigger-time", default="09:30") # NYSE market open
|
||||
parser.add_argument("--interval-minutes", type=int, default=60)
|
||||
parser.add_argument("--poll-interval", type=int, default=10)
|
||||
parser.add_argument("--start-date")
|
||||
parser.add_argument("--end-date")
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
1
backend/skills/__init__.py
Normal file
1
backend/skills/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
1
backend/skills/builtin/__init__.py
Normal file
1
backend/skills/builtin/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
@@ -1,21 +1,22 @@
|
||||
---
|
||||
name: fundamental_review
|
||||
description: Review a company from a fundamentals-first perspective before issuing a trading signal.
|
||||
name: 基本面分析
|
||||
description: 当用户要求“基本面分析”“看财务质量”“分析盈利能力”“判断公司质量”或“评估长期盈利韧性”时,应使用此技能。
|
||||
version: 1.0.0
|
||||
---
|
||||
|
||||
# Fundamental Review
|
||||
# 基本面分析
|
||||
|
||||
Use this skill when the task requires judging business quality, balance-sheet strength, profitability, or long-term earnings durability.
|
||||
当用户希望从公司质量、资产负债表强度、盈利能力或长期盈利韧性出发判断标的时,使用这个技能。
|
||||
|
||||
## Workflow
|
||||
## 工作流程
|
||||
|
||||
1. Check profitability, growth, financial health, and efficiency before forming a conclusion.
|
||||
2. Separate durable business quality from short-term noise.
|
||||
3. State what would invalidate the thesis.
|
||||
4. End with a clear signal, confidence, and the main drivers behind that signal.
|
||||
1. 在形成结论前,先检查盈利能力、成长性、财务健康度和经营效率。
|
||||
2. 区分可持续的业务质量和短期噪音。
|
||||
3. 明确指出会推翻当前判断的条件。
|
||||
4. 最终给出清晰的信号、置信度和主要驱动因素。
|
||||
|
||||
## Guardrails
|
||||
## 约束
|
||||
|
||||
- Do not rely on one metric in isolation.
|
||||
- Call out missing data explicitly.
|
||||
- Prefer conservative conclusions when financial quality is mixed.
|
||||
- 不要孤立依赖单一指标。
|
||||
- 缺失数据要明确指出。
|
||||
- 当财务质量优劣混杂时,优先给出保守结论。
|
||||
|
||||
@@ -1,21 +1,22 @@
|
||||
---
|
||||
name: portfolio_decisioning
|
||||
description: Synthesize analyst inputs and risk feedback into explicit portfolio decisions.
|
||||
name: 组合决策
|
||||
description: 当用户要求“组合决策”“给出最终仓位”“整合分析结论”“输出交易决策”或“形成组合操作方案”时,应使用此技能。
|
||||
version: 1.0.0
|
||||
---
|
||||
|
||||
# Portfolio Decisioning
|
||||
# 组合决策
|
||||
|
||||
Use this skill when you are responsible for converting team analysis into final trades.
|
||||
当用户需要把团队分析转化为最终交易决策时,使用这个技能。
|
||||
|
||||
## Workflow
|
||||
## 工作流程
|
||||
|
||||
1. Read analyst conclusions and risk warnings before acting.
|
||||
2. Evaluate the current portfolio, cash, and margin constraints.
|
||||
3. Record one explicit decision per ticker using the decision tool.
|
||||
4. Summarize the portfolio-level rationale after all decisions are recorded.
|
||||
1. 行动前先阅读分析师结论和风险警示。
|
||||
2. 评估当前组合、现金和保证金约束。
|
||||
3. 使用决策工具为每个 ticker 记录一个明确决策。
|
||||
4. 在全部决策记录完成后,总结组合层面的整体理由。
|
||||
|
||||
## Guardrails
|
||||
## 约束
|
||||
|
||||
- Position sizing must respect capital and margin limits.
|
||||
- Prefer smaller size when analyst conviction and risk signals disagree.
|
||||
- Do not leave a ticker undecided when the task expects a full slate of decisions.
|
||||
- 仓位大小必须遵守资金和保证金限制。
|
||||
- 当分析师信心与风险信号不一致时,优先采用更小仓位。
|
||||
- 当任务要求完整决策清单时,不要让任何 ticker 处于未决状态。
|
||||
|
||||
@@ -1,21 +1,22 @@
|
||||
---
|
||||
name: risk_review
|
||||
description: Assess portfolio and market risks before final position sizing and execution.
|
||||
name: 风险审查
|
||||
description: 当用户要求“风险审查”“看组合风险”“检查集中度”“评估波动风险”或“确认仓位风险边界”时,应使用此技能。
|
||||
version: 1.0.0
|
||||
---
|
||||
|
||||
# Risk Review
|
||||
# 风险审查
|
||||
|
||||
Use this skill when you must identify concentration, volatility, leverage, and scenario risks.
|
||||
当用户需要识别集中度、波动率、杠杆和情景风险时,使用这个技能。
|
||||
|
||||
## Workflow
|
||||
## 工作流程
|
||||
|
||||
1. Review the proposed exposure by ticker and theme.
|
||||
2. Identify concentration, volatility, liquidity, and leverage concerns.
|
||||
3. Rank warnings by severity.
|
||||
4. Translate risk findings into concrete limits or cautions for the portfolio manager.
|
||||
1. 按 ticker 和主题检查拟议敞口。
|
||||
2. 识别集中度、波动率、流动性和杠杆方面的风险点。
|
||||
3. 按严重程度排序风险警示。
|
||||
4. 将风险结论转化为给投资经理的具体限制或注意事项。
|
||||
|
||||
## Guardrails
|
||||
## 约束
|
||||
|
||||
- Focus on actionable risk controls.
|
||||
- Quantify limits when the available data supports it.
|
||||
- Distinguish fatal blockers from manageable risks.
|
||||
- 聚焦可执行的风险控制措施。
|
||||
- 当数据支持时尽量量化限制。
|
||||
- 明确区分致命阻断项和可管理风险。
|
||||
|
||||
@@ -1,21 +1,22 @@
|
||||
---
|
||||
name: sentiment_review
|
||||
description: Analyze news flow, market psychology, and insider behavior for catalyst-driven signals.
|
||||
name: 情绪分析
|
||||
description: 当用户要求“情绪分析”“看新闻情绪”“分析市场心理”“判断事件驱动信号”或“检查内幕行为”时,应使用此技能。
|
||||
version: 1.0.0
|
||||
---
|
||||
|
||||
# Sentiment Review
|
||||
# 情绪分析
|
||||
|
||||
Use this skill when the task depends on recent catalysts, news tone, or behavioral market signals.
|
||||
当用户需要基于近期催化剂、新闻语气或行为层面的市场信号做判断时,使用这个技能。
|
||||
|
||||
## Workflow
|
||||
## 工作流程
|
||||
|
||||
1. Review recent news and identify the dominant narrative.
|
||||
2. Check insider activity for confirming or conflicting signals.
|
||||
3. Separate durable sentiment shifts from transient noise.
|
||||
4. Explain how sentiment changes the near-term trade outlook.
|
||||
1. 回顾近期新闻并识别主导叙事。
|
||||
2. 检查内幕活动,寻找确认或冲突信号。
|
||||
3. 区分可持续的情绪变化和短暂噪音。
|
||||
4. 说明情绪如何改变短期交易展望。
|
||||
|
||||
## Guardrails
|
||||
## 约束
|
||||
|
||||
- Do not confuse attention with conviction.
|
||||
- Highlight when sentiment is strong but unsupported by fundamentals.
|
||||
- Be explicit about catalyst timing risk.
|
||||
- 不要把注意力误判为真实信念。
|
||||
- 当情绪很强但缺乏基本面支持时,要明确指出。
|
||||
- 对催化剂时间窗口风险要说清楚。
|
||||
|
||||
@@ -1,21 +1,22 @@
|
||||
---
|
||||
name: technical_review
|
||||
description: Evaluate price action, momentum, and volatility to judge timing and market regime.
|
||||
name: 技术分析
|
||||
description: 当用户要求“技术分析”“看走势”“判断入场时机”“分析动量”“评估波动率”或“判断市场状态”时,应使用此技能。
|
||||
version: 1.0.0
|
||||
---
|
||||
|
||||
# Technical Review
|
||||
# 技术分析
|
||||
|
||||
Use this skill when the task is sensitive to entry timing, trend quality, or short-term market structure.
|
||||
当用户需要从入场时机、趋势质量或短期市场结构出发判断标的时,使用这个技能。
|
||||
|
||||
## Workflow
|
||||
## 工作流程
|
||||
|
||||
1. Assess trend direction and strength.
|
||||
2. Check momentum and mean-reversion conditions.
|
||||
3. Review volatility before making aggressive recommendations.
|
||||
4. Convert the setup into a trading view with explicit risk awareness.
|
||||
1. 评估趋势方向和强度。
|
||||
2. 检查动量与均值回归条件。
|
||||
3. 在给出激进建议前先审视波动率。
|
||||
4. 将当前形态转化为带有明确风险意识的交易观点。
|
||||
|
||||
## Guardrails
|
||||
## 约束
|
||||
|
||||
- Distinguish trend continuation from overshoot.
|
||||
- Avoid strong conviction when signals conflict.
|
||||
- Treat volatility as a sizing input, not only a directional input.
|
||||
- 区分趋势延续和过度透支。
|
||||
- 当信号冲突时避免给出高确定性判断。
|
||||
- 将波动率视为仓位输入,而不仅仅是方向输入。
|
||||
|
||||
@@ -1,21 +1,31 @@
|
||||
---
|
||||
name: valuation_review
|
||||
description: Estimate fair value and margin of safety using multiple valuation lenses.
|
||||
name: 估值分析
|
||||
description: 当用户要求“估值分析”“看合理价值”“判断高估低估”“测算安全边际”或“比较多种估值方法”时,应使用此技能。
|
||||
version: 1.0.0
|
||||
---
|
||||
|
||||
# Valuation Review
|
||||
# 估值分析
|
||||
|
||||
Use this skill when the task requires determining whether a stock is cheap, expensive, or fairly priced.
|
||||
当用户需要判断一只股票是低估、高估还是定价合理时,使用这个技能。
|
||||
|
||||
## Workflow
|
||||
## 工作流程
|
||||
|
||||
1. Use more than one valuation method when possible.
|
||||
2. Compare intrinsic value estimates with current market pricing.
|
||||
3. Explain the key assumptions behind the valuation view.
|
||||
4. State the margin of safety and what could compress or expand it.
|
||||
1. 条件允许时,使用不止一种估值方法。
|
||||
2. 对比内在价值估计与当前市场价格。
|
||||
3. 解释估值判断背后的关键假设。
|
||||
4. 明确安全边际,以及哪些因素会压缩或扩大它。
|
||||
|
||||
## Guardrails
|
||||
## 可复用资源
|
||||
|
||||
- Treat valuation as a range, not a single precise number.
|
||||
- Call out assumption sensitivity.
|
||||
- Avoid high-confidence calls when inputs are sparse or unstable.
|
||||
- `scripts/dcf_report.py`
|
||||
用于贴现现金流估值的确定性计算和报告生成。
|
||||
- `scripts/owner_earnings_report.py`
|
||||
用于 owner earnings 估值的确定性计算和报告生成。
|
||||
- `scripts/multiple_valuation_report.py`
|
||||
用于 EV/EBITDA 和 Residual Income 两类估值报告生成。
|
||||
|
||||
## 约束
|
||||
|
||||
- 将估值视为区间,而不是一个精确点值。
|
||||
- 明确说明假设敏感性。
|
||||
- 当输入稀疏或不稳定时,避免给出高置信度判断。
|
||||
|
||||
1
backend/skills/builtin/valuation_review/__init__.py
Normal file
1
backend/skills/builtin/valuation_review/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
@@ -0,0 +1 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
@@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Deterministic DCF report helpers for the valuation_review skill."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import Iterable
|
||||
|
||||
|
||||
def build_dcf_report(rows: Iterable[dict], current_date: str) -> str:
|
||||
"""Render a DCF valuation report from normalized row inputs."""
|
||||
lines = [f"=== DCF Valuation Analysis ({current_date}) ===\n"]
|
||||
|
||||
for row in rows:
|
||||
error = row.get("error")
|
||||
ticker = row["ticker"]
|
||||
if error:
|
||||
lines.append(f"{ticker}: {error}\n")
|
||||
continue
|
||||
|
||||
current_fcf = float(row["current_fcf"])
|
||||
growth_rate = float(row["growth_rate"])
|
||||
market_cap = float(row["market_cap"])
|
||||
discount_rate = float(row.get("discount_rate", 0.10))
|
||||
terminal_growth = float(row.get("terminal_growth", 0.03))
|
||||
num_years = int(row.get("num_years", 5))
|
||||
|
||||
pv_fcf = sum(
|
||||
current_fcf
|
||||
* (1 + growth_rate) ** year
|
||||
/ (1 + discount_rate) ** year
|
||||
for year in range(1, num_years + 1)
|
||||
)
|
||||
terminal_fcf = (
|
||||
current_fcf
|
||||
* (1 + growth_rate) ** num_years
|
||||
* (1 + terminal_growth)
|
||||
)
|
||||
terminal_value = terminal_fcf / (discount_rate - terminal_growth)
|
||||
pv_terminal = terminal_value / (1 + discount_rate) ** num_years
|
||||
enterprise_value = pv_fcf + pv_terminal
|
||||
value_gap = (enterprise_value - market_cap) / market_cap * 100
|
||||
|
||||
if value_gap > 20:
|
||||
assessment = "SIGNIFICANTLY UNDERVALUED"
|
||||
elif value_gap > 0:
|
||||
assessment = "POTENTIALLY UNDERVALUED"
|
||||
elif value_gap > -20:
|
||||
assessment = "POTENTIALLY OVERVALUED"
|
||||
else:
|
||||
assessment = "SIGNIFICANTLY OVERVALUED"
|
||||
|
||||
lines.append(f"{ticker}:")
|
||||
lines.append(f" Current FCF: ${current_fcf:,.0f}")
|
||||
lines.append(f" DCF Enterprise Value: ${enterprise_value:,.0f}")
|
||||
lines.append(f" Market Cap: ${market_cap:,.0f}")
|
||||
lines.append(f" Value Gap: {value_gap:+.1f}% -> {assessment}")
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Read normalized rows from stdin and emit a text report."""
|
||||
payload = json.load(__import__("sys").stdin)
|
||||
print(build_dcf_report(payload["rows"], payload["current_date"]))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,115 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Deterministic multiple-based valuation helpers for the valuation_review skill."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import Iterable
|
||||
|
||||
|
||||
def build_ev_ebitda_report(rows: Iterable[dict], current_date: str) -> str:
|
||||
"""Render an EV/EBITDA valuation report from normalized row inputs."""
|
||||
lines = [f"=== EV/EBITDA Valuation ({current_date}) ===\n"]
|
||||
|
||||
for row in rows:
|
||||
error = row.get("error")
|
||||
ticker = row["ticker"]
|
||||
if error:
|
||||
lines.append(f"{ticker}: {error}\n")
|
||||
continue
|
||||
|
||||
current_multiple = float(row["current_multiple"])
|
||||
median_multiple = float(row["median_multiple"])
|
||||
current_ebitda = float(row["current_ebitda"])
|
||||
market_cap = float(row["market_cap"])
|
||||
net_debt = float(row["net_debt"])
|
||||
|
||||
implied_ev = median_multiple * current_ebitda
|
||||
implied_equity = max(implied_ev - net_debt, 0.0)
|
||||
value_gap = (
|
||||
(implied_equity - market_cap) / market_cap * 100
|
||||
if market_cap > 0
|
||||
else 0.0
|
||||
)
|
||||
multiple_discount = (
|
||||
(median_multiple - current_multiple) / median_multiple * 100
|
||||
)
|
||||
|
||||
if multiple_discount > 10:
|
||||
assessment = "TRADING BELOW HISTORICAL MULTIPLE"
|
||||
elif multiple_discount > -10:
|
||||
assessment = "NEAR HISTORICAL AVERAGE"
|
||||
else:
|
||||
assessment = "TRADING ABOVE HISTORICAL MULTIPLE"
|
||||
|
||||
lines.append(f"{ticker}:")
|
||||
lines.append(f" Current EV/EBITDA: {current_multiple:.1f}x")
|
||||
lines.append(f" Historical Median: {median_multiple:.1f}x")
|
||||
lines.append(f" Multiple vs History: {multiple_discount:+.1f}%")
|
||||
lines.append(f" Implied Equity Value: ${implied_equity:,.0f}")
|
||||
lines.append(f" Value Gap: {value_gap:+.1f}% -> {assessment}")
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def build_residual_income_report(rows: Iterable[dict], current_date: str) -> str:
|
||||
"""Render a residual income valuation report from normalized row inputs."""
|
||||
lines = [f"=== Residual Income Valuation ({current_date}) ===\n"]
|
||||
|
||||
for row in rows:
|
||||
error = row.get("error")
|
||||
ticker = row["ticker"]
|
||||
if error:
|
||||
lines.append(f"{ticker}: {error}\n")
|
||||
continue
|
||||
|
||||
book_value = float(row["book_value"])
|
||||
initial_ri = float(row["initial_ri"])
|
||||
market_cap = float(row["market_cap"])
|
||||
cost_of_equity = float(row.get("cost_of_equity", 0.10))
|
||||
bv_growth = float(row.get("bv_growth", 0.03))
|
||||
terminal_growth = float(row.get("terminal_growth", 0.03))
|
||||
num_years = int(row.get("num_years", 5))
|
||||
margin_of_safety = float(row.get("margin_of_safety", 0.20))
|
||||
|
||||
pv_ri = sum(
|
||||
initial_ri * (1 + bv_growth) ** year / (1 + cost_of_equity) ** year
|
||||
for year in range(1, num_years + 1)
|
||||
)
|
||||
terminal_ri = initial_ri * (1 + bv_growth) ** (num_years + 1)
|
||||
terminal_value = terminal_ri / (cost_of_equity - terminal_growth)
|
||||
pv_terminal = terminal_value / (1 + cost_of_equity) ** num_years
|
||||
intrinsic_value = (book_value + pv_ri + pv_terminal) * (
|
||||
1 - margin_of_safety
|
||||
)
|
||||
value_gap = (intrinsic_value - market_cap) / market_cap * 100
|
||||
|
||||
lines.append(f"{ticker}:")
|
||||
lines.append(f" Book Value: ${book_value:,.0f}")
|
||||
lines.append(f" Residual Income: ${initial_ri:,.0f}")
|
||||
lines.append(
|
||||
f" Intrinsic Value (w/ 20% MoS): ${intrinsic_value:,.0f}",
|
||||
)
|
||||
lines.append(f" Value Gap: {value_gap:+.1f}%")
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Read normalized rows from stdin and emit one selected text report."""
|
||||
payload = json.load(__import__("sys").stdin)
|
||||
mode = payload["mode"]
|
||||
if mode == "ev_ebitda":
|
||||
print(build_ev_ebitda_report(payload["rows"], payload["current_date"]))
|
||||
return
|
||||
if mode == "residual_income":
|
||||
print(build_residual_income_report(payload["rows"], payload["current_date"]))
|
||||
return
|
||||
raise ValueError(f"Unsupported mode: {mode}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,76 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Deterministic owner earnings valuation helpers for the valuation_review skill."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import Iterable
|
||||
|
||||
|
||||
def build_owner_earnings_report(rows: Iterable[dict], current_date: str) -> str:
|
||||
"""Render an owner earnings valuation report from normalized row inputs."""
|
||||
lines = [f"=== Owner Earnings Valuation ({current_date}) ===\n"]
|
||||
|
||||
for row in rows:
|
||||
error = row.get("error")
|
||||
ticker = row["ticker"]
|
||||
if error:
|
||||
lines.append(f"{ticker}: {error}\n")
|
||||
continue
|
||||
|
||||
owner_earnings = float(row["owner_earnings"])
|
||||
growth_rate = float(row["growth_rate"])
|
||||
market_cap = float(row["market_cap"])
|
||||
required_return = float(row.get("required_return", 0.15))
|
||||
margin_of_safety = float(row.get("margin_of_safety", 0.25))
|
||||
num_years = int(row.get("num_years", 5))
|
||||
|
||||
pv_earnings = sum(
|
||||
owner_earnings
|
||||
* (1 + growth_rate) ** year
|
||||
/ (1 + required_return) ** year
|
||||
for year in range(1, num_years + 1)
|
||||
)
|
||||
terminal_growth = min(growth_rate, 0.03)
|
||||
terminal_earnings = (
|
||||
owner_earnings
|
||||
* (1 + growth_rate) ** num_years
|
||||
* (1 + terminal_growth)
|
||||
)
|
||||
terminal_value = terminal_earnings / (
|
||||
required_return - terminal_growth
|
||||
)
|
||||
pv_terminal = terminal_value / (1 + required_return) ** num_years
|
||||
intrinsic_value = (pv_earnings + pv_terminal) * (1 - margin_of_safety)
|
||||
value_gap = (intrinsic_value - market_cap) / market_cap * 100
|
||||
|
||||
if value_gap > 20:
|
||||
assessment = "SIGNIFICANTLY UNDERVALUED"
|
||||
elif value_gap > 0:
|
||||
assessment = "POTENTIALLY UNDERVALUED"
|
||||
elif value_gap > -20:
|
||||
assessment = "POTENTIALLY OVERVALUED"
|
||||
else:
|
||||
assessment = "SIGNIFICANTLY OVERVALUED"
|
||||
|
||||
lines.append(f"{ticker}:")
|
||||
lines.append(f" Owner Earnings: ${owner_earnings:,.0f}")
|
||||
lines.append(
|
||||
f" Intrinsic Value (w/ 25% MoS): ${intrinsic_value:,.0f}",
|
||||
)
|
||||
lines.append(f" Market Cap: ${market_cap:,.0f}")
|
||||
lines.append(f" Value Gap: {value_gap:+.1f}% -> {assessment}")
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Read normalized rows from stdin and emit a text report."""
|
||||
payload = json.load(__import__("sys").stdin)
|
||||
print(build_owner_earnings_report(payload["rows"], payload["current_date"]))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
21
backend/skills/customized/portfolio_decisioning/SKILL.md
Normal file
21
backend/skills/customized/portfolio_decisioning/SKILL.md
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
name: 组合决策
|
||||
description: 整合分析师观点与风险反馈,形成明确的组合层决策。
|
||||
---
|
||||
|
||||
# 组合决策
|
||||
|
||||
当你负责把团队分析转化为最终交易决策时,使用这个技能。
|
||||
|
||||
## 工作流程
|
||||
|
||||
1. 行动前先阅读分析师结论和风险警示。
|
||||
2. 评估当前组合、现金和保证金约束。
|
||||
3. 使用决策工具为每个 ticker 记录一个明确决策。
|
||||
4. 在全部决策记录完成后,总结组合层面的整体理由。
|
||||
|
||||
## 约束
|
||||
|
||||
- 仓位大小必须遵守资金和保证金限制。
|
||||
- 当分析师信心与风险信号不一致时,优先采用更小仓位。
|
||||
- 当任务要求完整决策清单时,不要让任何 ticker 处于未决状态。
|
||||
21
backend/skills/customized/risk_review/SKILL.md
Normal file
21
backend/skills/customized/risk_review/SKILL.md
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
name: 风险审查
|
||||
description: 在最终仓位和执行前,评估组合与市场风险。
|
||||
---
|
||||
|
||||
# 风险审查
|
||||
|
||||
当你需要识别集中度、波动率、杠杆和情景风险时,使用这个技能。
|
||||
|
||||
## 工作流程
|
||||
|
||||
1. 按 ticker 和主题检查拟议敞口。
|
||||
2. 识别集中度、波动率、流动性和杠杆方面的风险点。
|
||||
3. 按严重程度排序风险警示。
|
||||
4. 将风险结论转化为给投资经理的具体限制或注意事项。
|
||||
|
||||
## 约束
|
||||
|
||||
- 聚焦可执行的风险控制措施。
|
||||
- 当数据支持时尽量量化限制。
|
||||
- 明确区分致命阻断项和可管理风险。
|
||||
21
backend/skills/customized/sentiment_review/SKILL.md
Normal file
21
backend/skills/customized/sentiment_review/SKILL.md
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
name: 情绪分析
|
||||
description: 分析新闻流、市场心理和内幕行为,识别事件驱动型信号。
|
||||
---
|
||||
|
||||
# 情绪分析
|
||||
|
||||
当任务依赖近期催化剂、新闻语气或行为层面的市场信号时,使用这个技能。
|
||||
|
||||
## 工作流程
|
||||
|
||||
1. 回顾近期新闻并识别主导叙事。
|
||||
2. 检查内幕活动,寻找确认或冲突信号。
|
||||
3. 区分可持续的情绪变化和短暂噪音。
|
||||
4. 说明情绪如何改变短期交易展望。
|
||||
|
||||
## 约束
|
||||
|
||||
- 不要把注意力误判为真实信念。
|
||||
- 当情绪很强但缺乏基本面支持时,要明确指出。
|
||||
- 对催化剂时间窗口风险要说清楚。
|
||||
21
backend/skills/customized/technical_review/SKILL.md
Normal file
21
backend/skills/customized/technical_review/SKILL.md
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
name: 技术分析
|
||||
description: 评估价格行为、动量和波动率,用于判断时机和市场状态。
|
||||
---
|
||||
|
||||
# 技术分析
|
||||
|
||||
当任务对入场时机、趋势质量或短期市场结构敏感时,使用这个技能。
|
||||
|
||||
## 工作流程
|
||||
|
||||
1. 评估趋势方向和强度。
|
||||
2. 检查动量与均值回归条件。
|
||||
3. 在给出激进建议前先审视波动率。
|
||||
4. 将当前形态转化为带有明确风险意识的交易观点。
|
||||
|
||||
## 约束
|
||||
|
||||
- 区分趋势延续和过度透支。
|
||||
- 当信号冲突时避免给出高确定性判断。
|
||||
- 将波动率视为仓位输入,而不仅仅是方向输入。
|
||||
21
backend/skills/customized/valuation_review/SKILL.md
Normal file
21
backend/skills/customized/valuation_review/SKILL.md
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
name: 估值分析
|
||||
description: 使用多种估值视角评估合理价值和安全边际。
|
||||
---
|
||||
|
||||
# 估值分析
|
||||
|
||||
当任务需要判断一只股票是低估、高估还是定价合理时,使用这个技能。
|
||||
|
||||
## 工作流程
|
||||
|
||||
1. 条件允许时,使用不止一种估值方法。
|
||||
2. 对比内在价值估计与当前市场价格。
|
||||
3. 解释估值判断背后的关键假设。
|
||||
4. 明确安全边际,以及哪些因素会压缩或扩大它。
|
||||
|
||||
## 约束
|
||||
|
||||
- 将估值视为区间,而不是一个精确点值。
|
||||
- 明确说明假设敏感性。
|
||||
- 当输入稀疏或不稳定时,避免给出高置信度判断。
|
||||
191
backend/tests/test_agent_workspace.py
Normal file
191
backend/tests/test_agent_workspace.py
Normal file
@@ -0,0 +1,191 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from backend.agents.prompt_factory import build_agent_system_prompt
|
||||
from backend.agents.skills_manager import SkillsManager
|
||||
from backend.agents.workspace_manager import WorkspaceManager
|
||||
|
||||
|
||||
class _DummyToolkit:
|
||||
def get_agent_skill_prompt(self):
|
||||
return ""
|
||||
|
||||
def get_activated_notes(self):
|
||||
return ""
|
||||
|
||||
|
||||
def test_workspace_manager_creates_extended_agent_files(tmp_path):
|
||||
manager = WorkspaceManager(project_root=tmp_path)
|
||||
|
||||
manager.initialize_default_assets(
|
||||
config_name="demo",
|
||||
agent_ids=["risk_manager"],
|
||||
analyst_personas={},
|
||||
)
|
||||
|
||||
asset_dir = tmp_path / "runs" / "demo" / "agents" / "risk_manager"
|
||||
assert (asset_dir / "SOUL.md").exists()
|
||||
assert (asset_dir / "PROFILE.md").exists()
|
||||
assert (asset_dir / "AGENTS.md").exists()
|
||||
assert (asset_dir / "MEMORY.md").exists()
|
||||
assert (asset_dir / "HEARTBEAT.md").exists()
|
||||
assert (asset_dir / "agent.yaml").exists()
|
||||
assert (asset_dir / "skills" / "installed").is_dir()
|
||||
assert (asset_dir / "skills" / "active").is_dir()
|
||||
assert (asset_dir / "skills" / "disabled").is_dir()
|
||||
assert (asset_dir / "skills" / "local").is_dir()
|
||||
|
||||
|
||||
def test_agent_workspace_config_controls_prompt_files(tmp_path, monkeypatch):
|
||||
manager = WorkspaceManager(project_root=tmp_path)
|
||||
manager.initialize_default_assets(
|
||||
config_name="demo",
|
||||
agent_ids=["risk_manager"],
|
||||
analyst_personas={},
|
||||
)
|
||||
asset_dir = tmp_path / "runs" / "demo" / "agents" / "risk_manager"
|
||||
(asset_dir / "SOUL.md").write_text("soul-line", encoding="utf-8")
|
||||
(asset_dir / "PROFILE.md").write_text("profile-line", encoding="utf-8")
|
||||
(asset_dir / "MEMORY.md").write_text("memory-line", encoding="utf-8")
|
||||
(asset_dir / "agent.yaml").write_text(
|
||||
"prompt_files:\n"
|
||||
" - SOUL.md\n"
|
||||
" - MEMORY.md\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
from backend.agents import prompt_factory
|
||||
|
||||
monkeypatch.setattr(
|
||||
prompt_factory,
|
||||
"SkillsManager",
|
||||
lambda: SkillsManager(project_root=tmp_path),
|
||||
)
|
||||
|
||||
prompt = build_agent_system_prompt(
|
||||
agent_id="risk_manager",
|
||||
config_name="demo",
|
||||
toolkit=_DummyToolkit(),
|
||||
)
|
||||
|
||||
assert "soul-line" in prompt
|
||||
assert "memory-line" in prompt
|
||||
assert "profile-line" not in prompt
|
||||
|
||||
|
||||
def test_skills_manager_applies_agent_level_skill_toggles(tmp_path):
|
||||
builtin_root = tmp_path / "backend" / "skills" / "builtin"
|
||||
for skill_name in ("risk_review", "extra_guard"):
|
||||
skill_dir = builtin_root / skill_name
|
||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
||||
(skill_dir / "SKILL.md").write_text(
|
||||
f"# {skill_name}\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
manager = WorkspaceManager(project_root=tmp_path)
|
||||
manager.initialize_default_assets(
|
||||
config_name="demo",
|
||||
agent_ids=["risk_manager"],
|
||||
analyst_personas={},
|
||||
)
|
||||
asset_dir = tmp_path / "runs" / "demo" / "agents" / "risk_manager"
|
||||
(asset_dir / "agent.yaml").write_text(
|
||||
"enabled_skills:\n"
|
||||
" - extra_guard\n"
|
||||
"disabled_skills:\n"
|
||||
" - risk_review\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
skills_manager = SkillsManager(project_root=tmp_path)
|
||||
active_map = skills_manager.prepare_active_skills(
|
||||
config_name="demo",
|
||||
agent_defaults={"risk_manager": ["risk_review"]},
|
||||
)
|
||||
|
||||
active_dirs = active_map["risk_manager"]
|
||||
assert [path.name for path in active_dirs] == ["extra_guard"]
|
||||
assert (asset_dir / "skills" / "installed" / "extra_guard" / "SKILL.md").exists()
|
||||
assert (asset_dir / "skills" / "active" / "extra_guard" / "SKILL.md").exists()
|
||||
assert (asset_dir / "skills" / "disabled" / "risk_review" / "SKILL.md").exists()
|
||||
assert not (asset_dir / "skills" / "active" / "risk_review").exists()
|
||||
|
||||
|
||||
def test_agent_local_skill_is_activated_from_agent_workspace(tmp_path):
|
||||
manager = WorkspaceManager(project_root=tmp_path)
|
||||
manager.initialize_default_assets(
|
||||
config_name="demo",
|
||||
agent_ids=["risk_manager"],
|
||||
analyst_personas={},
|
||||
)
|
||||
asset_dir = tmp_path / "runs" / "demo" / "agents" / "risk_manager"
|
||||
local_skill = asset_dir / "skills" / "local" / "local_guard"
|
||||
local_skill.mkdir(parents=True, exist_ok=True)
|
||||
(local_skill / "SKILL.md").write_text(
|
||||
"---\nname: 本地风控\ndescription: local skill\nversion: 1.0.0\n---\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
skills_manager = SkillsManager(project_root=tmp_path)
|
||||
active_map = skills_manager.prepare_active_skills(
|
||||
config_name="demo",
|
||||
agent_defaults={"risk_manager": []},
|
||||
)
|
||||
|
||||
assert [path.name for path in active_map["risk_manager"]] == ["local_guard"]
|
||||
assert (asset_dir / "skills" / "active" / "local_guard" / "SKILL.md").exists()
|
||||
|
||||
|
||||
def test_prompt_includes_active_skill_metadata_summary(tmp_path, monkeypatch):
|
||||
builtin_root = tmp_path / "backend" / "skills" / "builtin"
|
||||
skill_dir = builtin_root / "extra_guard"
|
||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
||||
(skill_dir / "SKILL.md").write_text(
|
||||
"---\n"
|
||||
"name: extra_guard\n"
|
||||
"description: This skill should be used when the user asks to \"run a risk check\".\n"
|
||||
"version: 1.0.0\n"
|
||||
"tools:\n"
|
||||
" - risk_ops\n"
|
||||
"---\n\n"
|
||||
"# Extra Guard\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
manager = WorkspaceManager(project_root=tmp_path)
|
||||
manager.initialize_default_assets(
|
||||
config_name="demo",
|
||||
agent_ids=["risk_manager"],
|
||||
analyst_personas={},
|
||||
)
|
||||
asset_dir = tmp_path / "runs" / "demo" / "agents" / "risk_manager"
|
||||
(asset_dir / "agent.yaml").write_text(
|
||||
"enabled_skills:\n"
|
||||
" - extra_guard\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
skills_manager = SkillsManager(project_root=tmp_path)
|
||||
skills_manager.prepare_active_skills(
|
||||
config_name="demo",
|
||||
agent_defaults={"risk_manager": []},
|
||||
)
|
||||
|
||||
from backend.agents import prompt_factory
|
||||
|
||||
monkeypatch.setattr(
|
||||
prompt_factory,
|
||||
"SkillsManager",
|
||||
lambda: SkillsManager(project_root=tmp_path),
|
||||
)
|
||||
|
||||
prompt = build_agent_system_prompt(
|
||||
agent_id="risk_manager",
|
||||
config_name="demo",
|
||||
toolkit=_DummyToolkit(),
|
||||
)
|
||||
|
||||
assert "Active Skill Catalog" in prompt
|
||||
assert "This skill should be used when the user asks to \"run a risk check\"." in prompt
|
||||
assert "version: 1.0.0" in prompt
|
||||
assert "risk_ops" not in prompt
|
||||
@@ -382,3 +382,341 @@ async def test_refresh_market_store_for_watchlist_emits_system_messages(monkeypa
|
||||
assert gateway.state_sync.system_messages[0] == "正在同步自选股市场数据: AAPL, MSFT"
|
||||
assert "自选股市场数据已同步:" in gateway.state_sync.system_messages[1]
|
||||
assert "AAPL prices=3 news=4" in gateway.state_sync.system_messages[1]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_get_agent_skills_returns_statuses(tmp_path):
|
||||
builtin_root = tmp_path / "backend" / "skills" / "builtin"
|
||||
for name in ("risk_review", "extra_guard"):
|
||||
skill_dir = builtin_root / name
|
||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
||||
(skill_dir / "SKILL.md").write_text(
|
||||
f"---\nname: {name}\ndescription: {name} desc\n---\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
agent_dir = tmp_path / "runs" / "demo" / "agents" / "risk_manager"
|
||||
agent_dir.mkdir(parents=True, exist_ok=True)
|
||||
(agent_dir / "agent.yaml").write_text(
|
||||
"enabled_skills:\n"
|
||||
" - extra_guard\n"
|
||||
"disabled_skills:\n"
|
||||
" - risk_review\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
gateway = make_gateway()
|
||||
gateway.config["config_name"] = "demo"
|
||||
gateway._project_root = tmp_path
|
||||
websocket = DummyWebSocket()
|
||||
|
||||
await gateway._handle_get_agent_skills(
|
||||
websocket,
|
||||
{"agent_id": "risk_manager"},
|
||||
)
|
||||
|
||||
assert websocket.messages[-1]["type"] == "agent_skills_loaded"
|
||||
statuses = {
|
||||
row["skill_name"]: row["status"]
|
||||
for row in websocket.messages[-1]["skills"]
|
||||
}
|
||||
assert statuses["extra_guard"] == "enabled"
|
||||
assert statuses["risk_review"] == "disabled"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_get_agent_profile_returns_model_and_tool_groups(monkeypatch, tmp_path):
|
||||
agent_dir = tmp_path / "runs" / "demo" / "agents" / "risk_manager"
|
||||
agent_dir.mkdir(parents=True, exist_ok=True)
|
||||
(agent_dir / "agent.yaml").write_text(
|
||||
"prompt_files:\n"
|
||||
" - SOUL.md\n"
|
||||
" - MEMORY.md\n"
|
||||
"active_tool_groups:\n"
|
||||
" - risk_ops\n"
|
||||
"disabled_tool_groups:\n"
|
||||
" - legacy_group\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
gateway = make_gateway()
|
||||
gateway.config["config_name"] = "demo"
|
||||
gateway._project_root = tmp_path
|
||||
websocket = DummyWebSocket()
|
||||
|
||||
monkeypatch.setattr(
|
||||
gateway_module,
|
||||
"load_agent_profiles",
|
||||
lambda: {"risk_manager": {"skills": ["risk_review"], "active_tool_groups": ["risk_ops", "legacy_group"]}},
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
gateway_module,
|
||||
"get_agent_model_info",
|
||||
lambda agent_id: ("gpt-4o-mini", "OPENAI"),
|
||||
)
|
||||
|
||||
class _Bootstrap:
|
||||
@staticmethod
|
||||
def agent_override(_agent_id):
|
||||
return {}
|
||||
|
||||
monkeypatch.setattr(
|
||||
gateway_module,
|
||||
"get_bootstrap_config_for_run",
|
||||
lambda project_root, config_name: _Bootstrap(),
|
||||
)
|
||||
|
||||
await gateway._handle_get_agent_profile(
|
||||
websocket,
|
||||
{"agent_id": "risk_manager"},
|
||||
)
|
||||
|
||||
assert websocket.messages[-1]["type"] == "agent_profile_loaded"
|
||||
profile = websocket.messages[-1]["profile"]
|
||||
assert profile["model_name"] == "gpt-4o-mini"
|
||||
assert profile["model_provider"] == "OPENAI"
|
||||
assert profile["prompt_files"] == ["SOUL.md", "MEMORY.md"]
|
||||
assert profile["active_tool_groups"] == ["risk_ops"]
|
||||
assert profile["disabled_tool_groups"] == ["legacy_group"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_get_skill_detail_returns_markdown_body(tmp_path):
|
||||
skill_dir = tmp_path / "backend" / "skills" / "builtin" / "risk_review"
|
||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
||||
(skill_dir / "SKILL.md").write_text(
|
||||
"---\nname: 风险审查\ndescription: 说明\nversion: 1.0.0\n---\n# 风险审查\n\n完整正文\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
gateway = make_gateway()
|
||||
gateway._project_root = tmp_path
|
||||
websocket = DummyWebSocket()
|
||||
|
||||
await gateway._handle_get_skill_detail(
|
||||
websocket,
|
||||
{"skill_name": "risk_review"},
|
||||
)
|
||||
|
||||
assert websocket.messages[-1]["type"] == "skill_detail_loaded"
|
||||
assert websocket.messages[-1]["skill"]["name"] == "风险审查"
|
||||
assert websocket.messages[-1]["skill"]["version"] == "1.0.0"
|
||||
assert websocket.messages[-1]["skill"]["content"] == "# 风险审查\n\n完整正文"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_get_skill_detail_prefers_agent_local_skill(tmp_path):
|
||||
skill_dir = tmp_path / "runs" / "demo" / "agents" / "risk_manager" / "skills" / "local" / "local_guard"
|
||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
||||
(skill_dir / "SKILL.md").write_text(
|
||||
"---\nname: 本地风控\ndescription: 本地说明\nversion: 1.0.0\n---\n# 本地风控\n\n本地正文\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
gateway = make_gateway()
|
||||
gateway.config["config_name"] = "demo"
|
||||
gateway._project_root = tmp_path
|
||||
websocket = DummyWebSocket()
|
||||
|
||||
await gateway._handle_get_skill_detail(
|
||||
websocket,
|
||||
{"agent_id": "risk_manager", "skill_name": "local_guard"},
|
||||
)
|
||||
|
||||
assert websocket.messages[-1]["type"] == "skill_detail_loaded"
|
||||
assert websocket.messages[-1]["agent_id"] == "risk_manager"
|
||||
assert websocket.messages[-1]["skill"]["source"] == "local"
|
||||
assert websocket.messages[-1]["skill"]["content"] == "# 本地风控\n\n本地正文"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_update_agent_skill_persists_and_returns_refresh(monkeypatch, tmp_path):
|
||||
skill_dir = tmp_path / "backend" / "skills" / "builtin" / "extra_guard"
|
||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
||||
(skill_dir / "SKILL.md").write_text(
|
||||
"---\nname: extra_guard\ndescription: desc\n---\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
gateway = make_gateway()
|
||||
gateway.config["config_name"] = "demo"
|
||||
gateway._project_root = tmp_path
|
||||
websocket = DummyWebSocket()
|
||||
|
||||
async def _noop_reload():
|
||||
return None
|
||||
|
||||
monkeypatch.setattr(gateway, "_handle_reload_runtime_assets", _noop_reload)
|
||||
|
||||
await gateway._handle_update_agent_skill(
|
||||
websocket,
|
||||
{
|
||||
"agent_id": "risk_manager",
|
||||
"skill_name": "extra_guard",
|
||||
"enabled": True,
|
||||
},
|
||||
)
|
||||
|
||||
assert websocket.messages[0]["type"] == "agent_skill_updated"
|
||||
assert websocket.messages[-1]["type"] == "agent_skills_loaded"
|
||||
agent_yaml = tmp_path / "runs" / "demo" / "agents" / "risk_manager" / "agent.yaml"
|
||||
assert "extra_guard" in agent_yaml.read_text(encoding="utf-8")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_create_and_update_agent_local_skill(monkeypatch, tmp_path):
|
||||
gateway = make_gateway()
|
||||
gateway.config["config_name"] = "demo"
|
||||
gateway._project_root = tmp_path
|
||||
websocket = DummyWebSocket()
|
||||
|
||||
async def _noop_reload():
|
||||
return None
|
||||
|
||||
monkeypatch.setattr(gateway, "_handle_reload_runtime_assets", _noop_reload)
|
||||
|
||||
await gateway._handle_create_agent_local_skill(
|
||||
websocket,
|
||||
{"agent_id": "risk_manager", "skill_name": "local_guard"},
|
||||
)
|
||||
|
||||
assert websocket.messages[0]["type"] == "agent_local_skill_created"
|
||||
assert websocket.messages[1]["type"] == "agent_skills_loaded"
|
||||
assert websocket.messages[2]["type"] == "skill_detail_loaded"
|
||||
target = tmp_path / "runs" / "demo" / "agents" / "risk_manager" / "skills" / "local" / "local_guard" / "SKILL.md"
|
||||
assert target.exists()
|
||||
|
||||
websocket.messages.clear()
|
||||
await gateway._handle_update_agent_local_skill(
|
||||
websocket,
|
||||
{
|
||||
"agent_id": "risk_manager",
|
||||
"skill_name": "local_guard",
|
||||
"content": "---\nname: 本地风控\ndescription: 更新后\nversion: 1.0.0\n---\n# 本地风控\n\n更新正文\n",
|
||||
},
|
||||
)
|
||||
|
||||
assert websocket.messages[0]["type"] == "agent_local_skill_updated"
|
||||
assert websocket.messages[1]["type"] == "skill_detail_loaded"
|
||||
assert "更新正文" in target.read_text(encoding="utf-8")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_delete_agent_local_skill(monkeypatch, tmp_path):
|
||||
skill_dir = tmp_path / "runs" / "demo" / "agents" / "risk_manager" / "skills" / "local" / "local_guard"
|
||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
||||
(skill_dir / "SKILL.md").write_text(
|
||||
"---\nname: 本地风控\ndescription: desc\nversion: 1.0.0\n---\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
agent_yaml = tmp_path / "runs" / "demo" / "agents" / "risk_manager" / "agent.yaml"
|
||||
agent_yaml.parent.mkdir(parents=True, exist_ok=True)
|
||||
agent_yaml.write_text(
|
||||
"enabled_skills:\n"
|
||||
" - local_guard\n"
|
||||
"disabled_skills:\n"
|
||||
" - local_guard\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
gateway = make_gateway()
|
||||
gateway.config["config_name"] = "demo"
|
||||
gateway._project_root = tmp_path
|
||||
websocket = DummyWebSocket()
|
||||
|
||||
async def _noop_reload():
|
||||
return None
|
||||
|
||||
monkeypatch.setattr(gateway, "_handle_reload_runtime_assets", _noop_reload)
|
||||
|
||||
await gateway._handle_delete_agent_local_skill(
|
||||
websocket,
|
||||
{"agent_id": "risk_manager", "skill_name": "local_guard"},
|
||||
)
|
||||
|
||||
assert websocket.messages[0]["type"] == "agent_local_skill_deleted"
|
||||
assert websocket.messages[1]["type"] == "agent_skills_loaded"
|
||||
assert not skill_dir.exists()
|
||||
assert "local_guard" not in agent_yaml.read_text(encoding="utf-8")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_remove_agent_skill_marks_disabled(monkeypatch, tmp_path):
|
||||
skill_dir = tmp_path / "backend" / "skills" / "builtin" / "risk_review"
|
||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
||||
(skill_dir / "SKILL.md").write_text(
|
||||
"---\nname: 风险审查\ndescription: desc\nversion: 1.0.0\n---\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
gateway = make_gateway()
|
||||
gateway.config["config_name"] = "demo"
|
||||
gateway._project_root = tmp_path
|
||||
websocket = DummyWebSocket()
|
||||
|
||||
async def _noop_reload():
|
||||
return None
|
||||
|
||||
monkeypatch.setattr(gateway, "_handle_reload_runtime_assets", _noop_reload)
|
||||
|
||||
await gateway._handle_remove_agent_skill(
|
||||
websocket,
|
||||
{"agent_id": "risk_manager", "skill_name": "risk_review"},
|
||||
)
|
||||
|
||||
assert websocket.messages[0]["type"] == "agent_skill_removed"
|
||||
assert websocket.messages[1]["type"] == "agent_skills_loaded"
|
||||
agent_yaml = tmp_path / "runs" / "demo" / "agents" / "risk_manager" / "agent.yaml"
|
||||
assert "risk_review" in agent_yaml.read_text(encoding="utf-8")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_get_agent_workspace_file_returns_content(tmp_path):
|
||||
file_path = tmp_path / "runs" / "demo" / "agents" / "risk_manager" / "SOUL.md"
|
||||
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
file_path.write_text("soul content", encoding="utf-8")
|
||||
|
||||
gateway = make_gateway()
|
||||
gateway.config["config_name"] = "demo"
|
||||
gateway._project_root = tmp_path
|
||||
websocket = DummyWebSocket()
|
||||
|
||||
await gateway._handle_get_agent_workspace_file(
|
||||
websocket,
|
||||
{"agent_id": "risk_manager", "filename": "SOUL.md"},
|
||||
)
|
||||
|
||||
assert websocket.messages[-1] == {
|
||||
"type": "agent_workspace_file_loaded",
|
||||
"config_name": "demo",
|
||||
"agent_id": "risk_manager",
|
||||
"filename": "SOUL.md",
|
||||
"content": "soul content",
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_update_agent_workspace_file_persists_and_returns_refresh(monkeypatch, tmp_path):
|
||||
gateway = make_gateway()
|
||||
gateway.config["config_name"] = "demo"
|
||||
gateway._project_root = tmp_path
|
||||
websocket = DummyWebSocket()
|
||||
|
||||
async def _noop_reload():
|
||||
return None
|
||||
|
||||
monkeypatch.setattr(gateway, "_handle_reload_runtime_assets", _noop_reload)
|
||||
|
||||
await gateway._handle_update_agent_workspace_file(
|
||||
websocket,
|
||||
{
|
||||
"agent_id": "risk_manager",
|
||||
"filename": "SOUL.md",
|
||||
"content": "updated soul",
|
||||
},
|
||||
)
|
||||
|
||||
assert websocket.messages[0]["type"] == "agent_workspace_file_updated"
|
||||
assert websocket.messages[-1]["type"] == "agent_workspace_file_loaded"
|
||||
target = tmp_path / "runs" / "demo" / "agents" / "risk_manager" / "SOUL.md"
|
||||
assert target.read_text(encoding="utf-8") == "updated soul"
|
||||
|
||||
72
backend/tests/test_skills_cli.py
Normal file
72
backend/tests/test_skills_cli.py
Normal file
@@ -0,0 +1,72 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from backend import cli
|
||||
from backend.agents.skill_metadata import parse_skill_metadata
|
||||
from backend.agents.skills_manager import SkillsManager
|
||||
|
||||
|
||||
def test_parse_skill_metadata_extended_frontmatter(tmp_path):
|
||||
skill_dir = tmp_path / "demo_skill"
|
||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
||||
(skill_dir / "SKILL.md").write_text(
|
||||
"---\n"
|
||||
"name: demo_skill\n"
|
||||
"description: Demo description\n"
|
||||
"tools:\n"
|
||||
" - technical\n"
|
||||
"---\n\n"
|
||||
"# Demo Skill\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
parsed = parse_skill_metadata(skill_dir, source="builtin")
|
||||
|
||||
assert parsed.skill_name == "demo_skill"
|
||||
assert parsed.description == "Demo description"
|
||||
assert parsed.tools == ["technical"]
|
||||
|
||||
|
||||
def test_update_agent_skill_overrides(tmp_path):
|
||||
manager = SkillsManager(project_root=tmp_path)
|
||||
asset_dir = manager.get_agent_asset_dir("demo", "risk_manager")
|
||||
asset_dir.mkdir(parents=True, exist_ok=True)
|
||||
(asset_dir / "agent.yaml").write_text(
|
||||
"enabled_skills:\n"
|
||||
" - risk_review\n"
|
||||
"disabled_skills:\n"
|
||||
" - old_skill\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
result = manager.update_agent_skill_overrides(
|
||||
config_name="demo",
|
||||
agent_id="risk_manager",
|
||||
enable=["extra_guard"],
|
||||
disable=["risk_review"],
|
||||
)
|
||||
|
||||
assert result["enabled_skills"] == ["extra_guard"]
|
||||
assert result["disabled_skills"] == ["old_skill", "risk_review"]
|
||||
|
||||
|
||||
def test_skills_enable_disable_and_list(monkeypatch, tmp_path):
|
||||
builtin_root = tmp_path / "backend" / "skills" / "builtin"
|
||||
for name in ("risk_review", "extra_guard"):
|
||||
skill_dir = builtin_root / name
|
||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
||||
(skill_dir / "SKILL.md").write_text(
|
||||
f"---\nname: {name}\ndescription: {name} desc\n---\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
printed = []
|
||||
monkeypatch.setattr(cli, "get_project_root", lambda: tmp_path)
|
||||
monkeypatch.setattr(cli.console, "print", lambda value: printed.append(value))
|
||||
|
||||
cli.skills_enable(agent_id="risk_manager", skill="extra_guard", config_name="demo")
|
||||
cli.skills_disable(agent_id="risk_manager", skill="risk_review", config_name="demo")
|
||||
cli.skills_list(config_name="demo", agent_id="risk_manager")
|
||||
|
||||
text_dump = "\n".join(str(item) for item in printed)
|
||||
assert "Enabled" in text_dump
|
||||
assert "Disabled" in text_dump
|
||||
assert any(getattr(item, "title", None) == "Skill Catalog" for item in printed)
|
||||
106
backend/tests/test_valuation_scripts.py
Normal file
106
backend/tests/test_valuation_scripts.py
Normal file
@@ -0,0 +1,106 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from backend.agents.skills_manager import SkillsManager
|
||||
from backend.skills.builtin.valuation_review.scripts.dcf_report import (
|
||||
build_dcf_report,
|
||||
)
|
||||
from backend.skills.builtin.valuation_review.scripts.multiple_valuation_report import (
|
||||
build_ev_ebitda_report,
|
||||
build_residual_income_report,
|
||||
)
|
||||
from backend.skills.builtin.valuation_review.scripts.owner_earnings_report import (
|
||||
build_owner_earnings_report,
|
||||
)
|
||||
|
||||
|
||||
def test_build_dcf_report_renders_assessment():
|
||||
report = build_dcf_report(
|
||||
[
|
||||
{
|
||||
"ticker": "AAPL",
|
||||
"current_fcf": 100.0,
|
||||
"growth_rate": 0.05,
|
||||
"market_cap": 900.0,
|
||||
"discount_rate": 0.10,
|
||||
"terminal_growth": 0.03,
|
||||
"num_years": 5,
|
||||
},
|
||||
],
|
||||
"2026-03-17",
|
||||
)
|
||||
|
||||
assert "DCF Valuation Analysis (2026-03-17)" in report
|
||||
assert "AAPL:" in report
|
||||
assert "Market Cap: $900" in report
|
||||
assert "Value Gap:" in report
|
||||
|
||||
|
||||
def test_build_owner_earnings_report_handles_errors():
|
||||
report = build_owner_earnings_report(
|
||||
[
|
||||
{
|
||||
"ticker": "MSFT",
|
||||
"error": "Negative owner earnings ($-50)",
|
||||
},
|
||||
],
|
||||
"2026-03-17",
|
||||
)
|
||||
|
||||
assert "MSFT: Negative owner earnings ($-50)" in report
|
||||
|
||||
|
||||
def test_multiple_valuation_reports_render_expected_sections():
|
||||
ev_report = build_ev_ebitda_report(
|
||||
[
|
||||
{
|
||||
"ticker": "NVDA",
|
||||
"current_multiple": 18.0,
|
||||
"median_multiple": 20.0,
|
||||
"current_ebitda": 50.0,
|
||||
"market_cap": 800.0,
|
||||
"net_debt": 100.0,
|
||||
},
|
||||
],
|
||||
"2026-03-17",
|
||||
)
|
||||
residual_report = build_residual_income_report(
|
||||
[
|
||||
{
|
||||
"ticker": "META",
|
||||
"book_value": 200.0,
|
||||
"initial_ri": 30.0,
|
||||
"market_cap": 300.0,
|
||||
"cost_of_equity": 0.10,
|
||||
"bv_growth": 0.03,
|
||||
"terminal_growth": 0.03,
|
||||
"num_years": 5,
|
||||
"margin_of_safety": 0.20,
|
||||
},
|
||||
],
|
||||
"2026-03-17",
|
||||
)
|
||||
|
||||
assert "EV/EBITDA Valuation (2026-03-17)" in ev_report
|
||||
assert "NVDA:" in ev_report
|
||||
assert "Residual Income Valuation (2026-03-17)" in residual_report
|
||||
assert "META:" in residual_report
|
||||
|
||||
|
||||
def test_prepare_active_skills_copies_skill_scripts(tmp_path):
|
||||
builtin_skill = tmp_path / "backend" / "skills" / "builtin" / "valuation_review"
|
||||
scripts_dir = builtin_skill / "scripts"
|
||||
scripts_dir.mkdir(parents=True, exist_ok=True)
|
||||
(builtin_skill / "SKILL.md").write_text(
|
||||
"---\nname: 估值分析\ndescription: desc\nversion: 1.0.0\n---\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
(scripts_dir / "dcf_report.py").write_text("print('ok')\n", encoding="utf-8")
|
||||
|
||||
manager = SkillsManager(project_root=tmp_path)
|
||||
active_map = manager.prepare_active_skills(
|
||||
config_name="demo",
|
||||
agent_defaults={"valuation_analyst": ["valuation_review"]},
|
||||
)
|
||||
|
||||
active_dir = active_map["valuation_analyst"][0]
|
||||
assert (active_dir / "scripts" / "dcf_report.py").exists()
|
||||
@@ -22,6 +22,16 @@ from agentscope.message import TextBlock
|
||||
from agentscope.tool import ToolResponse
|
||||
|
||||
from backend.data.provider_utils import normalize_symbol
|
||||
from backend.skills.builtin.valuation_review.scripts.dcf_report import (
|
||||
build_dcf_report,
|
||||
)
|
||||
from backend.skills.builtin.valuation_review.scripts.multiple_valuation_report import (
|
||||
build_ev_ebitda_report,
|
||||
build_residual_income_report,
|
||||
)
|
||||
from backend.skills.builtin.valuation_review.scripts.owner_earnings_report import (
|
||||
build_owner_earnings_report,
|
||||
)
|
||||
from backend.tools.data_tools import (
|
||||
get_company_news,
|
||||
get_financial_metrics,
|
||||
@@ -814,7 +824,7 @@ def dcf_valuation_analysis(
|
||||
|
||||
current_date = _resolved_date(current_date)
|
||||
tickers = _parse_tickers(tickers)
|
||||
lines = [f"=== DCF Valuation Analysis ({current_date}) ===\n"]
|
||||
rows = []
|
||||
|
||||
for ticker in tickers:
|
||||
metrics = get_financial_metrics(
|
||||
@@ -823,7 +833,7 @@ def dcf_valuation_analysis(
|
||||
limit=8,
|
||||
)
|
||||
if not metrics:
|
||||
lines.append(f"{ticker}: No financial metrics\n")
|
||||
rows.append({"ticker": ticker, "error": "No financial metrics"})
|
||||
continue
|
||||
|
||||
line_items = search_line_items(
|
||||
@@ -838,56 +848,28 @@ def dcf_valuation_analysis(
|
||||
or not line_items[0].free_cash_flow
|
||||
or line_items[0].free_cash_flow <= 0
|
||||
):
|
||||
lines.append(f"{ticker}: Invalid free cash flow data\n")
|
||||
rows.append({"ticker": ticker, "error": "Invalid free cash flow data"})
|
||||
continue
|
||||
|
||||
market_cap = get_market_cap(ticker, current_date)
|
||||
if not market_cap:
|
||||
lines.append(f"{ticker}: Market cap unavailable\n")
|
||||
rows.append({"ticker": ticker, "error": "Market cap unavailable"})
|
||||
continue
|
||||
|
||||
m = metrics[0]
|
||||
current_fcf = line_items[0].free_cash_flow
|
||||
growth_rate = m.earnings_growth or 0.05
|
||||
discount_rate = 0.10
|
||||
terminal_growth = 0.03
|
||||
num_years = 5
|
||||
|
||||
# DCF calculation
|
||||
pv_fcf = sum(
|
||||
current_fcf
|
||||
* (1 + growth_rate) ** year
|
||||
/ (1 + discount_rate) ** year
|
||||
for year in range(1, num_years + 1)
|
||||
rows.append(
|
||||
{
|
||||
"ticker": ticker,
|
||||
"current_fcf": line_items[0].free_cash_flow,
|
||||
"growth_rate": m.earnings_growth or 0.05,
|
||||
"market_cap": market_cap,
|
||||
"discount_rate": 0.10,
|
||||
"terminal_growth": 0.03,
|
||||
"num_years": 5,
|
||||
},
|
||||
)
|
||||
terminal_fcf = (
|
||||
current_fcf
|
||||
* (1 + growth_rate) ** num_years
|
||||
* (1 + terminal_growth)
|
||||
)
|
||||
terminal_value = terminal_fcf / (discount_rate - terminal_growth)
|
||||
pv_terminal = terminal_value / (1 + discount_rate) ** num_years
|
||||
enterprise_value = pv_fcf + pv_terminal
|
||||
value_gap = (enterprise_value - market_cap) / market_cap * 100
|
||||
|
||||
# Assessment
|
||||
if value_gap > 20:
|
||||
assessment = "SIGNIFICANTLY UNDERVALUED"
|
||||
elif value_gap > 0:
|
||||
assessment = "POTENTIALLY UNDERVALUED"
|
||||
elif value_gap > -20:
|
||||
assessment = "POTENTIALLY OVERVALUED"
|
||||
else:
|
||||
assessment = "SIGNIFICANTLY OVERVALUED"
|
||||
|
||||
lines.append(f"{ticker}:")
|
||||
lines.append(f" Current FCF: ${current_fcf:,.0f}")
|
||||
lines.append(f" DCF Enterprise Value: ${enterprise_value:,.0f}")
|
||||
lines.append(f" Market Cap: ${market_cap:,.0f}")
|
||||
lines.append(f" Value Gap: {value_gap:+.1f}% -> {assessment}")
|
||||
lines.append("")
|
||||
|
||||
return _to_text_response("\n".join(lines))
|
||||
return _to_text_response(build_dcf_report(rows, current_date))
|
||||
|
||||
|
||||
@safe
|
||||
@@ -911,7 +893,7 @@ def owner_earnings_valuation_analysis(
|
||||
|
||||
current_date = _resolved_date(current_date)
|
||||
tickers = _parse_tickers(tickers)
|
||||
lines = [f"=== Owner Earnings Valuation ({current_date}) ===\n"]
|
||||
rows = []
|
||||
|
||||
for ticker in tickers:
|
||||
metrics = get_financial_metrics(
|
||||
@@ -920,7 +902,7 @@ def owner_earnings_valuation_analysis(
|
||||
limit=8,
|
||||
)
|
||||
if not metrics:
|
||||
lines.append(f"{ticker}: No financial metrics\n")
|
||||
rows.append({"ticker": ticker, "error": "No financial metrics"})
|
||||
continue
|
||||
|
||||
line_items = search_line_items(
|
||||
@@ -936,12 +918,12 @@ def owner_earnings_valuation_analysis(
|
||||
limit=2,
|
||||
)
|
||||
if len(line_items) < 2:
|
||||
lines.append(f"{ticker}: Insufficient financial data\n")
|
||||
rows.append({"ticker": ticker, "error": "Insufficient financial data"})
|
||||
continue
|
||||
|
||||
market_cap = get_market_cap(ticker, current_date)
|
||||
if not market_cap:
|
||||
lines.append(f"{ticker}: Market cap unavailable\n")
|
||||
rows.append({"ticker": ticker, "error": "Market cap unavailable"})
|
||||
continue
|
||||
|
||||
m = metrics[0]
|
||||
@@ -956,57 +938,27 @@ def owner_earnings_valuation_analysis(
|
||||
|
||||
owner_earnings = net_income + depreciation - capex - wc_change
|
||||
if owner_earnings <= 0:
|
||||
lines.append(
|
||||
f"{ticker}: Negative owner earnings (${owner_earnings:,.0f})\n",
|
||||
rows.append(
|
||||
{
|
||||
"ticker": ticker,
|
||||
"error": f"Negative owner earnings (${owner_earnings:,.0f})",
|
||||
},
|
||||
)
|
||||
continue
|
||||
|
||||
# Valuation
|
||||
growth_rate = m.earnings_growth or 0.05
|
||||
required_return = 0.15
|
||||
margin_of_safety = 0.25
|
||||
num_years = 5
|
||||
|
||||
pv_earnings = sum(
|
||||
owner_earnings
|
||||
* (1 + growth_rate) ** year
|
||||
/ (1 + required_return) ** year
|
||||
for year in range(1, num_years + 1)
|
||||
rows.append(
|
||||
{
|
||||
"ticker": ticker,
|
||||
"owner_earnings": owner_earnings,
|
||||
"growth_rate": m.earnings_growth or 0.05,
|
||||
"market_cap": market_cap,
|
||||
"required_return": 0.15,
|
||||
"margin_of_safety": 0.25,
|
||||
"num_years": 5,
|
||||
},
|
||||
)
|
||||
terminal_growth = min(growth_rate, 0.03)
|
||||
terminal_earnings = (
|
||||
owner_earnings
|
||||
* (1 + growth_rate) ** num_years
|
||||
* (1 + terminal_growth)
|
||||
)
|
||||
terminal_value = terminal_earnings / (
|
||||
required_return - terminal_growth
|
||||
)
|
||||
pv_terminal = terminal_value / (1 + required_return) ** num_years
|
||||
|
||||
intrinsic_value = (pv_earnings + pv_terminal) * (1 - margin_of_safety)
|
||||
value_gap = (intrinsic_value - market_cap) / market_cap * 100
|
||||
|
||||
# Assessment
|
||||
if value_gap > 20:
|
||||
assessment = "SIGNIFICANTLY UNDERVALUED"
|
||||
elif value_gap > 0:
|
||||
assessment = "POTENTIALLY UNDERVALUED"
|
||||
elif value_gap > -20:
|
||||
assessment = "POTENTIALLY OVERVALUED"
|
||||
else:
|
||||
assessment = "SIGNIFICANTLY OVERVALUED"
|
||||
|
||||
lines.append(f"{ticker}:")
|
||||
lines.append(f" Owner Earnings: ${owner_earnings:,.0f}")
|
||||
lines.append(
|
||||
f" Intrinsic Value (w/ 25% MoS): ${intrinsic_value:,.0f}",
|
||||
)
|
||||
lines.append(f" Market Cap: ${market_cap:,.0f}")
|
||||
lines.append(f" Value Gap: {value_gap:+.1f}% -> {assessment}")
|
||||
lines.append("")
|
||||
|
||||
return _to_text_response("\n".join(lines))
|
||||
return _to_text_response(build_owner_earnings_report(rows, current_date))
|
||||
|
||||
|
||||
@safe
|
||||
@@ -1030,7 +982,7 @@ def ev_ebitda_valuation_analysis(
|
||||
|
||||
current_date = _resolved_date(current_date)
|
||||
tickers = _parse_tickers(tickers)
|
||||
lines = [f"=== EV/EBITDA Valuation ({current_date}) ===\n"]
|
||||
rows = []
|
||||
|
||||
for ticker in tickers:
|
||||
metrics = get_financial_metrics(
|
||||
@@ -1039,7 +991,7 @@ def ev_ebitda_valuation_analysis(
|
||||
limit=8,
|
||||
)
|
||||
if not metrics:
|
||||
lines.append(f"{ticker}: No financial metrics\n")
|
||||
rows.append({"ticker": ticker, "error": "No financial metrics"})
|
||||
continue
|
||||
|
||||
m = metrics[0]
|
||||
@@ -1048,12 +1000,12 @@ def ev_ebitda_valuation_analysis(
|
||||
or not m.enterprise_value_to_ebitda_ratio
|
||||
or m.enterprise_value_to_ebitda_ratio <= 0
|
||||
):
|
||||
lines.append(f"{ticker}: Missing EV/EBITDA data\n")
|
||||
rows.append({"ticker": ticker, "error": "Missing EV/EBITDA data"})
|
||||
continue
|
||||
|
||||
market_cap = get_market_cap(ticker, current_date)
|
||||
if not market_cap:
|
||||
lines.append(f"{ticker}: Market cap unavailable\n")
|
||||
rows.append({"ticker": ticker, "error": "Market cap unavailable"})
|
||||
continue
|
||||
|
||||
current_ebitda = (
|
||||
@@ -1067,42 +1019,21 @@ def ev_ebitda_valuation_analysis(
|
||||
and x.enterprise_value_to_ebitda_ratio > 0
|
||||
]
|
||||
if len(valid_multiples) < 3:
|
||||
lines.append(f"{ticker}: Insufficient historical data\n")
|
||||
rows.append({"ticker": ticker, "error": "Insufficient historical data"})
|
||||
continue
|
||||
|
||||
median_multiple = median(valid_multiples)
|
||||
current_multiple = m.enterprise_value_to_ebitda_ratio
|
||||
|
||||
implied_ev = median_multiple * current_ebitda
|
||||
net_debt = m.enterprise_value - market_cap
|
||||
implied_equity = max(implied_ev - net_debt, 0)
|
||||
|
||||
value_gap = (
|
||||
(implied_equity - market_cap) / market_cap * 100
|
||||
if market_cap > 0
|
||||
else 0
|
||||
)
|
||||
multiple_discount = (
|
||||
(median_multiple - current_multiple) / median_multiple * 100
|
||||
rows.append(
|
||||
{
|
||||
"ticker": ticker,
|
||||
"current_multiple": m.enterprise_value_to_ebitda_ratio,
|
||||
"median_multiple": median(valid_multiples),
|
||||
"current_ebitda": current_ebitda,
|
||||
"market_cap": market_cap,
|
||||
"net_debt": m.enterprise_value - market_cap,
|
||||
},
|
||||
)
|
||||
|
||||
# Assessment
|
||||
if multiple_discount > 10:
|
||||
assessment = "TRADING BELOW HISTORICAL MULTIPLE"
|
||||
elif multiple_discount > -10:
|
||||
assessment = "NEAR HISTORICAL AVERAGE"
|
||||
else:
|
||||
assessment = "TRADING ABOVE HISTORICAL MULTIPLE"
|
||||
|
||||
lines.append(f"{ticker}:")
|
||||
lines.append(f" Current EV/EBITDA: {current_multiple:.1f}x")
|
||||
lines.append(f" Historical Median: {median_multiple:.1f}x")
|
||||
lines.append(f" Multiple vs History: {multiple_discount:+.1f}%")
|
||||
lines.append(f" Implied Equity Value: ${implied_equity:,.0f}")
|
||||
lines.append(f" Value Gap: {value_gap:+.1f}% -> {assessment}")
|
||||
lines.append("")
|
||||
|
||||
return _to_text_response("\n".join(lines))
|
||||
return _to_text_response(build_ev_ebitda_report(rows, current_date))
|
||||
|
||||
|
||||
@safe
|
||||
@@ -1126,7 +1057,7 @@ def residual_income_valuation_analysis(
|
||||
|
||||
current_date = _resolved_date(current_date)
|
||||
tickers = _parse_tickers(tickers)
|
||||
lines = [f"=== Residual Income Valuation ({current_date}) ===\n"]
|
||||
rows = []
|
||||
|
||||
for ticker in tickers:
|
||||
metrics = get_financial_metrics(
|
||||
@@ -1135,7 +1066,7 @@ def residual_income_valuation_analysis(
|
||||
limit=8,
|
||||
)
|
||||
if not metrics:
|
||||
lines.append(f"{ticker}: No financial metrics\n")
|
||||
rows.append({"ticker": ticker, "error": "No financial metrics"})
|
||||
continue
|
||||
|
||||
line_items = search_line_items(
|
||||
@@ -1146,59 +1077,44 @@ def residual_income_valuation_analysis(
|
||||
limit=1,
|
||||
)
|
||||
if not line_items or not line_items[0].net_income:
|
||||
lines.append(f"{ticker}: No net income data\n")
|
||||
rows.append({"ticker": ticker, "error": "No net income data"})
|
||||
continue
|
||||
|
||||
market_cap = get_market_cap(ticker, current_date)
|
||||
if not market_cap:
|
||||
lines.append(f"{ticker}: Market cap unavailable\n")
|
||||
rows.append({"ticker": ticker, "error": "Market cap unavailable"})
|
||||
continue
|
||||
|
||||
m = metrics[0]
|
||||
if not m.price_to_book_ratio or m.price_to_book_ratio <= 0:
|
||||
lines.append(f"{ticker}: Invalid P/B ratio\n")
|
||||
rows.append({"ticker": ticker, "error": "Invalid P/B ratio"})
|
||||
continue
|
||||
|
||||
net_income = line_items[0].net_income
|
||||
pb_ratio = m.price_to_book_ratio
|
||||
book_value = market_cap / pb_ratio
|
||||
|
||||
# Model parameters
|
||||
cost_of_equity = 0.10
|
||||
bv_growth = m.book_value_growth or 0.03
|
||||
terminal_growth = 0.03
|
||||
num_years = 5
|
||||
margin_of_safety = 0.20
|
||||
|
||||
initial_ri = net_income - cost_of_equity * book_value
|
||||
if initial_ri <= 0:
|
||||
lines.append(f"{ticker}: Negative residual income\n")
|
||||
rows.append({"ticker": ticker, "error": "Negative residual income"})
|
||||
continue
|
||||
|
||||
# PV calculation
|
||||
pv_ri = sum(
|
||||
initial_ri * (1 + bv_growth) ** year / (1 + cost_of_equity) ** year
|
||||
for year in range(1, num_years + 1)
|
||||
rows.append(
|
||||
{
|
||||
"ticker": ticker,
|
||||
"book_value": book_value,
|
||||
"initial_ri": initial_ri,
|
||||
"market_cap": market_cap,
|
||||
"cost_of_equity": cost_of_equity,
|
||||
"bv_growth": m.book_value_growth or 0.03,
|
||||
"terminal_growth": 0.03,
|
||||
"num_years": 5,
|
||||
"margin_of_safety": 0.20,
|
||||
},
|
||||
)
|
||||
terminal_ri = initial_ri * (1 + bv_growth) ** (num_years + 1)
|
||||
terminal_value = terminal_ri / (cost_of_equity - terminal_growth)
|
||||
pv_terminal = terminal_value / (1 + cost_of_equity) ** num_years
|
||||
|
||||
intrinsic_value = (book_value + pv_ri + pv_terminal) * (
|
||||
1 - margin_of_safety
|
||||
)
|
||||
value_gap = (intrinsic_value - market_cap) / market_cap * 100
|
||||
|
||||
lines.append(f"{ticker}:")
|
||||
lines.append(f" Book Value: ${book_value:,.0f}")
|
||||
lines.append(f" Residual Income: ${initial_ri:,.0f}")
|
||||
lines.append(
|
||||
f" Intrinsic Value (w/ 20% MoS): ${intrinsic_value:,.0f}",
|
||||
)
|
||||
lines.append(f" Value Gap: {value_gap:+.1f}%")
|
||||
lines.append("")
|
||||
|
||||
return _to_text_response("\n".join(lines))
|
||||
return _to_text_response(build_residual_income_report(rows, current_date))
|
||||
|
||||
|
||||
# Tool Registry for dynamic toolkit creation
|
||||
|
||||
Reference in New Issue
Block a user