246 lines
7.8 KiB
Python
246 lines
7.8 KiB
Python
"""Custom Agent Example for OpenClaw Trading.
|
|
|
|
This example demonstrates how to create a custom trading agent by inheriting
|
|
from the BaseAgent class. The custom agent implements specific analysis logic
|
|
and decision-making behavior.
|
|
|
|
To run:
|
|
python examples/custom_agent.py
|
|
"""
|
|
|
|
import asyncio
|
|
import random
|
|
from typing import Any, Dict
|
|
|
|
from openclaw.agents.base import ActivityType, BaseAgent
|
|
from openclaw.utils.logging import get_logger
|
|
|
|
|
|
class MomentumAgent(BaseAgent):
|
|
"""A custom momentum-based trading agent.
|
|
|
|
This agent uses simple momentum indicators to make trading decisions.
|
|
It demonstrates how to extend the BaseAgent class with custom logic.
|
|
|
|
Args:
|
|
agent_id: Unique identifier for this agent
|
|
initial_capital: Starting balance for the agent
|
|
momentum_threshold: Threshold for momentum signals (default: 0.05)
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
agent_id: str,
|
|
initial_capital: float,
|
|
momentum_threshold: float = 0.05,
|
|
):
|
|
super().__init__(
|
|
agent_id=agent_id,
|
|
initial_capital=initial_capital,
|
|
skill_level=0.6, # Momentum agents start with decent skills
|
|
)
|
|
self.momentum_threshold = momentum_threshold
|
|
self.recent_prices: Dict[str, list] = {}
|
|
self.logger = get_logger(f"agents.{agent_id}")
|
|
self.logger.info(
|
|
f"MomentumAgent created with threshold={momentum_threshold:.1%}"
|
|
)
|
|
|
|
async def decide_activity(self) -> ActivityType:
|
|
"""Decide what activity to perform based on economic status.
|
|
|
|
The agent will:
|
|
1. Trade if it has sufficient capital and good win rate
|
|
2. Learn if skill level is low
|
|
3. Rest if balance is critically low
|
|
"""
|
|
# Check if bankrupt
|
|
if not self.check_survival():
|
|
return ActivityType.REST
|
|
|
|
# If skill level is low, prioritize learning
|
|
if self.skill_level < 0.4 and self.balance > 200:
|
|
return ActivityType.LEARN
|
|
|
|
# If win rate is good and have capital, trade
|
|
if self.win_rate > 0.5 and self.balance > 500:
|
|
return ActivityType.TRADE
|
|
|
|
# Default to paper trading to practice
|
|
return ActivityType.PAPER_TRADE
|
|
|
|
async def analyze(self, symbol: str) -> Dict[str, Any]:
|
|
"""Analyze a symbol using momentum indicators.
|
|
|
|
This is a simplified analysis that would typically use real market data.
|
|
In production, this would calculate actual momentum from price history.
|
|
|
|
Args:
|
|
symbol: The trading symbol to analyze (e.g., "AAPL")
|
|
|
|
Returns:
|
|
Analysis results with signal and confidence
|
|
"""
|
|
# Simulate momentum calculation
|
|
# In real implementation, this would use actual price data
|
|
momentum = random.uniform(-0.15, 0.15)
|
|
|
|
# Determine signal based on momentum threshold
|
|
if momentum > self.momentum_threshold:
|
|
signal = "BUY"
|
|
confidence = min(1.0, momentum * 5) # Scale confidence
|
|
elif momentum < -self.momentum_threshold:
|
|
signal = "SELL"
|
|
confidence = min(1.0, abs(momentum) * 5)
|
|
else:
|
|
signal = "HOLD"
|
|
confidence = 0.5
|
|
|
|
# Boost confidence based on agent skill level
|
|
confidence = min(1.0, confidence * (0.8 + 0.2 * self.skill_level))
|
|
|
|
result = {
|
|
"symbol": symbol,
|
|
"signal": signal,
|
|
"confidence": confidence,
|
|
"momentum": momentum,
|
|
"threshold": self.momentum_threshold,
|
|
"agent_skill": self.skill_level,
|
|
}
|
|
|
|
self.logger.info(
|
|
f"Analysis for {symbol}: {signal} (confidence: {confidence:.1%})"
|
|
)
|
|
|
|
return result
|
|
|
|
def simulate_trade(
|
|
self, symbol: str, signal: str, confidence: float
|
|
) -> Dict[str, Any]:
|
|
"""Simulate executing a trade based on analysis signal.
|
|
|
|
Args:
|
|
symbol: Trading symbol
|
|
signal: Trade signal (BUY, SELL, HOLD)
|
|
confidence: Confidence level in the signal
|
|
|
|
Returns:
|
|
Trade result information
|
|
"""
|
|
if signal == "HOLD":
|
|
return {"action": "HOLD", "pnl": 0.0, "executed": False}
|
|
|
|
# Simulate trade outcome based on confidence and some randomness
|
|
win_probability = confidence * 0.7 + (self.skill_level * 0.3)
|
|
is_win = random.random() < win_probability
|
|
|
|
# Calculate PnL
|
|
trade_amount = min(self.balance * 0.1, 1000) # Risk 10% or max $1000
|
|
pnl = trade_amount * 0.05 if is_win else -trade_amount * 0.03
|
|
|
|
# Record the trade
|
|
self.record_trade(is_win=is_win, pnl=pnl)
|
|
|
|
# Pay for the trade
|
|
from openclaw.core.economy import TradingEconomicTracker
|
|
self.economic_tracker.calculate_trade_cost(
|
|
trade_value=trade_amount,
|
|
is_win=is_win,
|
|
win_amount=pnl if is_win else 0,
|
|
)
|
|
|
|
return {
|
|
"action": signal,
|
|
"symbol": symbol,
|
|
"amount": trade_amount,
|
|
"pnl": pnl,
|
|
"is_win": is_win,
|
|
"executed": True,
|
|
}
|
|
|
|
|
|
async def main():
|
|
"""Run the custom agent example."""
|
|
print("=" * 60)
|
|
print("OpenClaw Trading - Custom Agent Example")
|
|
print("=" * 60)
|
|
|
|
# Create a custom momentum agent
|
|
print("\n1. Creating custom momentum agent...")
|
|
agent = MomentumAgent(
|
|
agent_id="momentum_001",
|
|
initial_capital=5000.0,
|
|
momentum_threshold=0.05,
|
|
)
|
|
print(f" Agent: {agent}")
|
|
|
|
# Register event hooks
|
|
def on_trade_callback(agent, **kwargs):
|
|
print(f" [Event] Trade completed: {'WIN' if kwargs.get('is_win') else 'LOSS'}")
|
|
|
|
def on_level_up_callback(agent, **kwargs):
|
|
print(f" [Event] Agent leveled up!")
|
|
|
|
agent.register_hook("on_trade", on_trade_callback)
|
|
agent.register_hook("on_level_up", on_level_up_callback)
|
|
|
|
# Run simulation
|
|
print("\n2. Running trading simulation...")
|
|
symbols = ["AAPL", "GOOGL", "MSFT", "TSLA", "NVDA"]
|
|
|
|
for i in range(10):
|
|
print(f"\n --- Iteration {i + 1} ---")
|
|
|
|
# Decide activity
|
|
activity = await agent.decide_activity()
|
|
print(f" Activity: {activity.value}")
|
|
|
|
if activity in [ActivityType.TRADE, ActivityType.PAPER_TRADE]:
|
|
# Pick a random symbol
|
|
symbol = random.choice(symbols)
|
|
|
|
# Analyze
|
|
analysis = await agent.analyze(symbol)
|
|
print(f" Analysis: {analysis['signal']} {symbol} "
|
|
f"(confidence: {analysis['confidence']:.1%})")
|
|
|
|
# Execute trade if not HOLD
|
|
if analysis["signal"] != "HOLD":
|
|
result = agent.simulate_trade(
|
|
symbol=symbol,
|
|
signal=analysis["signal"],
|
|
confidence=analysis["confidence"],
|
|
)
|
|
print(f" Trade PnL: ${result['pnl']:+.2f}")
|
|
|
|
elif activity == ActivityType.LEARN:
|
|
# Simulate learning
|
|
print(" Agent is learning...")
|
|
agent.improve_skill(0.05)
|
|
|
|
# Check status
|
|
status = agent.get_status_dict()
|
|
print(f" Balance: ${status['balance']:,.2f} | "
|
|
f"Win Rate: {status['win_rate']:.1%} | "
|
|
f"Trades: {status['total_trades']}")
|
|
|
|
# Check survival
|
|
if not agent.check_survival():
|
|
print("\n Agent is bankrupt! Stopping simulation.")
|
|
break
|
|
|
|
# Final summary
|
|
print("\n" + "=" * 60)
|
|
print("Final Agent Status:")
|
|
print("=" * 60)
|
|
final_status = agent.get_status_dict()
|
|
for key, value in final_status.items():
|
|
if isinstance(value, float):
|
|
print(f" {key}: {value:.2f}" if value > 1 else f" {key}: {value:.1%}")
|
|
else:
|
|
print(f" {key}: {value}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
asyncio.run(main())
|