stock/tests/unit/test_optimizer.py
ZhangPeng 9aecdd036c Initial commit: OpenClaw Trading - AI多智能体量化交易系统
- 添加项目核心代码和配置
- 添加前端界面 (Next.js)
- 添加单元测试
- 更新 .gitignore 排除缓存和依赖
2026-02-27 03:47:40 +08:00

737 lines
25 KiB
Python

"""Unit tests for strategy optimizer module."""
import time
from datetime import datetime, timedelta
from unittest.mock import Mock
import numpy as np
import pytest
from openclaw.backtest.engine import BacktestResult
from openclaw.optimizer import (
BayesianOptimizer,
GridSearchOptimizer,
OptimizationAnalyzer,
OptimizationResult,
OptimizerConfig,
ParameterSpace,
RandomSearchOptimizer,
StrategyOptimizer,
)
from openclaw.optimizer.base import OptimizationObjective
class TestParameterSpace:
"""Test ParameterSpace class."""
def test_add_continuous_parameter(self):
"""Test adding continuous parameter."""
space = ParameterSpace()
space.add_continuous("learning_rate", 0.001, 0.1, distribution="log_uniform")
assert "learning_rate" in space
param = space.get_parameter("learning_rate")
assert param.param_type.value == "continuous"
assert param.bounds == (0.001, 0.1)
assert param.distribution == "log_uniform"
def test_add_integer_parameter(self):
"""Test adding integer parameter."""
space = ParameterSpace()
space.add_integer("window_size", 5, 50)
assert "window_size" in space
param = space.get_parameter("window_size")
assert param.param_type.value == "integer"
assert param.bounds == (5, 50)
def test_add_discrete_parameter(self):
"""Test adding discrete parameter."""
space = ParameterSpace()
space.add_discrete("threshold", [0.1, 0.2, 0.3, 0.4, 0.5])
assert "threshold" in space
param = space.get_parameter("threshold")
assert param.param_type.value == "discrete"
assert param.bounds == [0.1, 0.2, 0.3, 0.4, 0.5]
def test_add_categorical_parameter(self):
"""Test adding categorical parameter."""
space = ParameterSpace()
space.add_categorical("strategy_type", ["momentum", "mean_reversion", "trend_following"])
assert "strategy_type" in space
param = space.get_parameter("strategy_type")
assert param.param_type.value == "categorical"
assert "momentum" in param.bounds
def test_method_chaining(self):
"""Test that methods can be chained."""
space = (
ParameterSpace()
.add_continuous("param1", 0.0, 1.0)
.add_integer("param2", 1, 10)
.add_categorical("param3", ["a", "b"])
)
assert len(space) == 3
assert "param1" in space
assert "param2" in space
assert "param3" in space
def test_invalid_bounds(self):
"""Test that invalid bounds raise errors."""
space = ParameterSpace()
with pytest.raises(ValueError, match="Invalid bounds"):
space.add_continuous("invalid", 1.0, 0.5)
def test_sample_random(self):
"""Test random sampling from parameter space."""
space = (
ParameterSpace()
.add_continuous("continuous", 0.0, 1.0)
.add_integer("integer", 1, 10)
.add_categorical("categorical", ["a", "b", "c"])
)
params = space.sample_random()
assert "continuous" in params
assert 0.0 <= params["continuous"] <= 1.0
assert "integer" in params
assert 1 <= params["integer"] <= 10
assert "categorical" in params
assert params["categorical"] in ["a", "b", "c"]
def test_get_grid_points(self):
"""Test grid point generation."""
space = (
ParameterSpace()
.add_continuous("param1", 0.0, 1.0)
.add_integer("param2", 1, 3)
)
grid = space.get_grid_points(n_points=3)
# Should have 3 (continuous) * 3 (integer: 1,2,3) = 9 points
assert len(grid) == 9
# Check first point
assert "param1" in grid[0]
assert "param2" in grid[0]
def test_get_nonexistent_parameter(self):
"""Test getting a parameter that doesn't exist."""
space = ParameterSpace()
with pytest.raises(KeyError):
space.get_parameter("nonexistent")
class TestOptimizerConfig:
"""Test OptimizerConfig class."""
def test_default_config(self):
"""Test default configuration values."""
config = OptimizerConfig()
assert config.objective == OptimizationObjective.MAXIMIZE_SHARPE
assert config.max_iterations == 100
assert config.n_jobs == -1
assert config.early_stopping is True
assert config.early_stopping_patience == 10
def test_custom_config(self):
"""Test custom configuration."""
config = OptimizerConfig(
objective=OptimizationObjective.MAXIMIZE_RETURN,
max_iterations=50,
n_jobs=4,
early_stopping=False,
random_state=42,
)
assert config.objective == OptimizationObjective.MAXIMIZE_RETURN
assert config.max_iterations == 50
assert config.n_jobs == 4
assert config.early_stopping is False
assert config.random_state == 42
def test_invalid_max_iterations(self):
"""Test that invalid max_iterations raises error."""
with pytest.raises(ValueError, match="max_iterations"):
OptimizerConfig(max_iterations=0)
def test_invalid_validation_split(self):
"""Test that invalid validation_split raises error."""
with pytest.raises(ValueError, match="validation_split"):
OptimizerConfig(validation_split=1.5)
class TestGridSearchOptimizer:
"""Test GridSearchOptimizer class."""
@pytest.fixture
def simple_space(self):
"""Create a simple parameter space."""
return (
ParameterSpace()
.add_discrete("threshold", [0.1, 0.2, 0.3])
.add_integer("window", 5, 6)
)
@pytest.fixture
def mock_backtest_fn(self):
"""Create a mock backtest function."""
def backtest_fn(params):
# Simple scoring: higher threshold + window = better
score = params.get("threshold", 0) * 100 + params.get("window", 0)
return BacktestResult(
start_date=datetime.now(),
end_date=datetime.now(),
initial_capital=10000.0,
final_equity=10000.0 + score * 100,
total_return=score,
total_trades=10,
winning_trades=5,
losing_trades=5,
win_rate=50.0,
avg_win=100.0,
avg_loss=-50.0,
profit_factor=1.5,
sharpe_ratio=score / 10,
max_drawdown=10.0,
max_drawdown_duration=5,
volatility=15.0,
calmar_ratio=score / 10,
)
return backtest_fn
def test_initialization(self, simple_space):
"""Test optimizer initialization."""
optimizer = GridSearchOptimizer(simple_space, n_points=3)
assert optimizer.parameter_space == simple_space
assert optimizer.n_points == 3
def test_get_grid_size(self, simple_space):
"""Test getting grid size."""
optimizer = GridSearchOptimizer(simple_space, n_points=3)
# 3 thresholds * 2 windows (5, 6) = 6
assert optimizer.get_grid_size() == 6
def test_optimize(self, simple_space, mock_backtest_fn):
"""Test optimization."""
config = OptimizerConfig(max_iterations=100, n_jobs=1)
optimizer = GridSearchOptimizer(simple_space, config=config, n_points=3)
result = optimizer.optimize(mock_backtest_fn)
assert isinstance(result, OptimizationResult)
assert result.best_params is not None
assert result.best_score > float("-inf")
assert len(result.all_results) == 6 # All combinations
assert result.converged is True
def test_optimize_with_max_iterations(self, simple_space, mock_backtest_fn):
"""Test optimization respects max_iterations."""
config = OptimizerConfig(max_iterations=4, n_jobs=1)
optimizer = GridSearchOptimizer(simple_space, config=config, n_points=5)
result = optimizer.optimize(mock_backtest_fn)
# Should be limited by max_iterations
assert result.n_iterations <= 4
def test_optimize_with_callback(self, simple_space, mock_backtest_fn):
"""Test optimization with callback."""
callback_calls = []
def callback(iteration, params, score):
callback_calls.append((iteration, params, score))
config = OptimizerConfig(max_iterations=100, n_jobs=1)
optimizer = GridSearchOptimizer(simple_space, config=config)
optimizer.optimize(mock_backtest_fn, callback=callback)
assert len(callback_calls) == 6
class TestRandomSearchOptimizer:
"""Test RandomSearchOptimizer class."""
@pytest.fixture
def continuous_space(self):
"""Create a parameter space with continuous parameters."""
return (
ParameterSpace()
.add_continuous("alpha", 0.0, 1.0)
.add_continuous("beta", 0.0, 1.0)
)
@pytest.fixture
def mock_backtest_fn(self):
"""Create a mock backtest function."""
def backtest_fn(params):
score = params.get("alpha", 0) + params.get("beta", 0)
return BacktestResult(
start_date=datetime.now(),
end_date=datetime.now(),
initial_capital=10000.0,
final_equity=10000.0,
total_return=score * 100,
total_trades=10,
winning_trades=5,
losing_trades=5,
win_rate=50.0,
avg_win=100.0,
avg_loss=-50.0,
profit_factor=1.5,
sharpe_ratio=score,
max_drawdown=10.0,
max_drawdown_duration=5,
volatility=15.0,
calmar_ratio=score,
)
return backtest_fn
def test_initialization(self, continuous_space):
"""Test optimizer initialization."""
optimizer = RandomSearchOptimizer(continuous_space, n_samples=50)
assert optimizer.parameter_space == continuous_space
assert optimizer.n_samples == 50
def test_optimize(self, continuous_space, mock_backtest_fn):
"""Test optimization."""
config = OptimizerConfig(max_iterations=100, n_jobs=1, early_stopping=False, random_state=42)
optimizer = RandomSearchOptimizer(continuous_space, config=config, n_samples=20)
result = optimizer.optimize(mock_backtest_fn)
assert isinstance(result, OptimizationResult)
assert result.best_params is not None
assert result.best_score > float("-inf")
assert result.n_iterations == 20
def test_optimize_with_early_stopping(self, continuous_space, mock_backtest_fn):
"""Test optimization with early stopping."""
config = OptimizerConfig(
max_iterations=100,
n_jobs=1,
early_stopping=True,
early_stopping_patience=2,
early_stopping_min_delta=0.1,
random_state=42,
)
optimizer = RandomSearchOptimizer(continuous_space, config=config, n_samples=50)
result = optimizer.optimize(mock_backtest_fn)
# Should stop early due to no improvement
assert result.n_iterations < 50
def test_optimize_with_warm_start(self, continuous_space, mock_backtest_fn):
"""Test optimization with warm start."""
initial_params = [
({"alpha": 0.5, "beta": 0.5}, 1.0),
({"alpha": 0.6, "beta": 0.4}, 1.0),
]
config = OptimizerConfig(max_iterations=100, n_jobs=1, random_state=42)
optimizer = RandomSearchOptimizer(continuous_space, config=config, n_samples=10)
result = optimizer.optimize_with_warm_start(mock_backtest_fn, initial_params)
assert len(result.all_results) >= 2 # At least initial params
assert result.best_score >= 1.0
class TestBayesianOptimizer:
"""Test BayesianOptimizer class."""
@pytest.fixture
def simple_space(self):
"""Create a simple parameter space."""
return (
ParameterSpace()
.add_continuous("x", 0.0, 10.0)
.add_continuous("y", 0.0, 10.0)
)
@pytest.fixture
def mock_backtest_fn(self):
"""Create a mock backtest function with known optimum."""
def backtest_fn(params):
# Optimum at x=7, y=3
x = params.get("x", 0)
y = params.get("y", 0)
score = -((x - 7) ** 2 + (y - 3) ** 2) / 100 + 5
return BacktestResult(
start_date=datetime.now(),
end_date=datetime.now(),
initial_capital=10000.0,
final_equity=10000.0,
total_return=score * 10,
total_trades=10,
winning_trades=5,
losing_trades=5,
win_rate=50.0,
avg_win=100.0,
avg_loss=-50.0,
profit_factor=1.5,
sharpe_ratio=score,
max_drawdown=10.0,
max_drawdown_duration=5,
volatility=15.0,
calmar_ratio=score,
)
return backtest_fn
def test_initialization(self, simple_space):
"""Test optimizer initialization."""
optimizer = BayesianOptimizer(
simple_space,
n_initial_points=5,
acquisition="ei",
)
assert optimizer.parameter_space == simple_space
assert optimizer.n_initial_points == 5
assert optimizer.acquisition == "ei"
def test_invalid_acquisition(self, simple_space):
"""Test that invalid acquisition function raises error."""
with pytest.raises(ValueError, match="Unknown acquisition"):
BayesianOptimizer(simple_space, acquisition="invalid")
def test_optimize(self, simple_space, mock_backtest_fn):
"""Test optimization."""
config = OptimizerConfig(max_iterations=20, n_jobs=1, random_state=42)
optimizer = BayesianOptimizer(
simple_space,
config=config,
n_initial_points=5,
acquisition="ei",
)
result = optimizer.optimize(mock_backtest_fn)
assert isinstance(result, OptimizationResult)
assert result.best_params is not None
assert result.best_score > float("-inf")
assert result.n_iterations >= 5 # At least initial points
# Should find something close to optimum
best = result.best_params
assert 5 <= best["x"] <= 9 # Around 7
assert 1 <= best["y"] <= 5 # Around 3
def test_parameter_importance(self, simple_space, mock_backtest_fn):
"""Test parameter importance calculation."""
config = OptimizerConfig(max_iterations=15, n_jobs=1, random_state=42)
optimizer = BayesianOptimizer(
simple_space,
config=config,
n_initial_points=5,
)
result = optimizer.optimize(mock_backtest_fn)
assert "x" in result.parameter_importance
assert "y" in result.parameter_importance
# Importance scores should sum to 1
assert abs(sum(result.parameter_importance.values()) - 1.0) < 0.01
class TestOptimizationAnalyzer:
"""Test OptimizationAnalyzer class."""
@pytest.fixture
def analyzer(self):
"""Create an analyzer instance."""
return OptimizationAnalyzer()
@pytest.fixture
def sample_result(self):
"""Create a sample optimization result."""
all_results = [
({"param1": 0.1, "param2": 10}, 1.0, None),
({"param1": 0.2, "param2": 20}, 2.0, None),
({"param1": 0.3, "param2": 30}, 3.0, None),
({"param1": 0.4, "param2": 40}, 4.0, None),
({"param1": 0.5, "param2": 50}, 5.0, None),
]
return OptimizationResult(
best_params={"param1": 0.5, "param2": 50},
best_score=5.0,
best_result=None,
all_results=all_results,
optimization_time=10.0,
n_iterations=5,
converged=True,
)
def test_analyze_parameter_sensitivity(self, analyzer, sample_result):
"""Test parameter sensitivity analysis."""
sensitivity = analyzer.analyze_parameter_sensitivity(
sample_result, "param1"
)
assert sensitivity.parameter_name == "param1"
assert len(sensitivity.values) > 0
assert len(sensitivity.scores) > 0
assert sensitivity.sensitivity_score >= 0.0
def test_detect_overfitting(self, analyzer):
"""Test overfitting detection."""
train_result = OptimizationResult(
best_params={},
best_score=10.0,
best_result=None,
all_results=[],
converged=True,
)
validation_result = OptimizationResult(
best_params={},
best_score=5.0,
best_result=None,
all_results=[],
converged=True,
)
overfitting = analyzer.detect_overfitting(
train_result, validation_result, threshold=0.3
)
assert overfitting.is_overfitted is True
assert overfitting.train_score == 10.0
assert overfitting.validation_score == 5.0
assert overfitting.severity in ["low", "medium", "high"]
def test_no_overfitting(self, analyzer):
"""Test when there's no overfitting."""
train_result = OptimizationResult(
best_params={},
best_score=10.0,
best_result=None,
all_results=[],
converged=True,
)
validation_result = OptimizationResult(
best_params={},
best_score=10.0, # Same as train = no overfitting
best_result=None,
all_results=[],
converged=True,
)
overfitting = analyzer.detect_overfitting(
train_result, validation_result, threshold=0.3
)
assert overfitting.is_overfitted is False
assert overfitting.severity == "none"
def test_get_optimization_curve(self, analyzer, sample_result):
"""Test getting optimization curve."""
iterations, best_scores = analyzer.get_optimization_curve(sample_result)
assert len(iterations) == 5
assert len(best_scores) == 5
assert best_scores == [1.0, 2.0, 3.0, 4.0, 5.0] # Cumulative best
def test_get_convergence_rate(self, analyzer, sample_result):
"""Test convergence rate calculation."""
rate = analyzer.get_convergence_rate(sample_result, window_size=2)
assert isinstance(rate, float)
def test_get_top_configurations(self, analyzer, sample_result):
"""Test getting top configurations."""
top = analyzer.get_top_configurations(sample_result, n_top=3)
assert len(top) == 3
# Should be sorted by score (descending)
assert top[0][1] >= top[1][1] >= top[2][1]
def test_calculate_robustness_score(self, analyzer, sample_result):
"""Test robustness score calculation."""
score = analyzer.calculate_robustness_score(sample_result, n_bootstrap=50)
assert 0.0 <= score <= 1.0
def test_generate_report(self, analyzer, sample_result):
"""Test report generation."""
report = analyzer.generate_report(sample_result)
assert "best_params" in report
assert "best_score" in report
assert "n_iterations" in report
assert "score_statistics" in report
assert "parameter_sensitivity" in report
assert "top_configurations" in report
assert report["best_score"] == 5.0
assert report["converged"] is True
def test_empty_results(self, analyzer):
"""Test handling of empty results."""
empty_result = OptimizationResult(
best_params={},
best_score=0.0,
best_result=None,
all_results=[],
converged=False,
)
report = analyzer.generate_report(empty_result)
assert "error" in report
class TestIntegration:
"""Integration tests for the optimizer module."""
def test_full_optimization_workflow(self):
"""Test a complete optimization workflow."""
# Create parameter space
space = (
ParameterSpace()
.add_continuous("multiplier", 1.0, 3.0)
.add_integer("period", 5, 15)
)
# Create mock backtest function
call_count = 0
def backtest_fn(params):
nonlocal call_count
call_count += 1
multiplier = params.get("multiplier", 1.0)
period = params.get("period", 10)
score = multiplier * period / 10
return BacktestResult(
start_date=datetime.now(),
end_date=datetime.now(),
initial_capital=10000.0,
final_equity=10000.0 + score * 100,
total_return=score * 10,
total_trades=10,
winning_trades=5,
losing_trades=5,
win_rate=50.0,
avg_win=100.0,
avg_loss=-50.0,
profit_factor=1.5,
sharpe_ratio=score,
max_drawdown=10.0,
max_drawdown_duration=5,
volatility=15.0,
calmar_ratio=score,
)
# Test with each optimizer
config = OptimizerConfig(max_iterations=10, n_jobs=1, random_state=42)
# Grid Search
call_count = 0
grid_optimizer = GridSearchOptimizer(space, config=config, n_points=2)
grid_result = grid_optimizer.optimize(backtest_fn)
assert grid_result.best_score > 0
assert len(grid_result.all_results) > 0
# Random Search
call_count = 0
random_optimizer = RandomSearchOptimizer(space, config=config, n_samples=10)
random_result = random_optimizer.optimize(backtest_fn)
assert random_result.best_score > 0
# Bayesian Optimization
call_count = 0
bayesian_optimizer = BayesianOptimizer(
space,
config=config,
n_initial_points=5,
acquisition="ei",
)
bayesian_result = bayesian_optimizer.optimize(backtest_fn)
assert bayesian_result.best_score > 0
# Analyze results
analyzer = OptimizationAnalyzer()
grid_report = analyzer.generate_report(grid_result)
assert grid_report["best_score"] == grid_result.best_score
def test_different_objectives(self):
"""Test optimization with different objectives."""
space = ParameterSpace().add_continuous("param", 0.0, 1.0)
def backtest_fn(params):
value = params.get("param", 0.5)
return BacktestResult(
start_date=datetime.now(),
end_date=datetime.now(),
initial_capital=10000.0,
final_equity=10000.0,
total_return=value * 100,
total_trades=10,
winning_trades=int(value * 10),
losing_trades=int((1 - value) * 10),
win_rate=value * 100,
avg_win=100.0,
avg_loss=-50.0,
profit_factor=1.5,
sharpe_ratio=value * 2,
max_drawdown=(1 - value) * 20,
max_drawdown_duration=5,
volatility=15.0,
calmar_ratio=value * 5,
)
# Test different objectives
objectives = [
OptimizationObjective.MAXIMIZE_RETURN,
OptimizationObjective.MAXIMIZE_SHARPE,
OptimizationObjective.MAXIMIZE_CALMAR,
OptimizationObjective.MINIMIZE_DRAWDOWN,
OptimizationObjective.MAXIMIZE_WIN_RATE,
]
for objective in objectives:
config = OptimizerConfig(
objective=objective,
max_iterations=5,
n_jobs=1,
random_state=42,
)
optimizer = RandomSearchOptimizer(space, config=config, n_samples=5)
result = optimizer.optimize(backtest_fn)
assert result.best_score != float("-inf")
def test_parameter_correlations(self):
"""Test parameter correlation analysis."""
all_results = [
({"x": 1.0, "y": 1.0}, 2.0, None),
({"x": 2.0, "y": 2.0}, 4.0, None),
({"x": 3.0, "y": 3.0}, 6.0, None),
({"x": 4.0, "y": 4.0}, 8.0, None),
({"x": 5.0, "y": 5.0}, 10.0, None),
]
result = OptimizationResult(
best_params={"x": 5.0, "y": 5.0},
best_score=10.0,
best_result=None,
all_results=all_results,
converged=True,
)
analyzer = OptimizationAnalyzer()
correlations = analyzer.analyze_parameter_correlations(result)
# x and y are perfectly correlated in the data
assert ("x", "y") in correlations or ("y", "x") in correlations