Edwin Salguero
commited on
Commit
·
859af74
0
Parent(s):
Initial commit: Enhanced Algorithmic Trading System with Synthetic Data Generation, Comprehensive Logging, and Extensive Testing
Browse files- Added synthetic data generator with realistic market data simulation
- Implemented comprehensive logging system with rotating log files
- Created extensive test suite with unit and integration tests
- Enhanced strategy agent with technical indicators (SMA, RSI, Bollinger Bands, MACD)
- Improved execution agent with realistic order simulation
- Added backtesting and live trading capabilities
- Updated configuration system with synthetic data and logging settings
- Created comprehensive documentation and demo script
- .gitignore +101 -0
- README.md +298 -0
- agentic_ai_system/agent_base.py +23 -0
- agentic_ai_system/data_ingestion.py +149 -0
- agentic_ai_system/execution_agent.py +206 -0
- agentic_ai_system/logger_config.py +142 -0
- agentic_ai_system/main.py +106 -0
- agentic_ai_system/orchestrator.py +244 -0
- agentic_ai_system/strategy_agent.py +244 -0
- agentic_ai_system/synthetic_data_generator.py +228 -0
- config.yaml +35 -0
- demo.py +148 -0
- pytest.ini +19 -0
- requirements.txt +9 -0
- tests/__init__.py +1 -0
- tests/test_data_ingestion.py +311 -0
- tests/test_execution_agent.py +300 -0
- tests/test_integration.py +313 -0
- tests/test_strategy_agent.py +252 -0
- tests/test_synthetic_data_generator.py +210 -0
.gitignore
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Python
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
*.so
|
6 |
+
.Python
|
7 |
+
build/
|
8 |
+
develop-eggs/
|
9 |
+
dist/
|
10 |
+
downloads/
|
11 |
+
eggs/
|
12 |
+
.eggs/
|
13 |
+
lib/
|
14 |
+
lib64/
|
15 |
+
parts/
|
16 |
+
sdist/
|
17 |
+
var/
|
18 |
+
wheels/
|
19 |
+
*.egg-info/
|
20 |
+
.installed.cfg
|
21 |
+
*.egg
|
22 |
+
MANIFEST
|
23 |
+
|
24 |
+
# PyInstaller
|
25 |
+
*.manifest
|
26 |
+
*.spec
|
27 |
+
|
28 |
+
# Installer logs
|
29 |
+
pip-log.txt
|
30 |
+
pip-delete-this-directory.txt
|
31 |
+
|
32 |
+
# Unit test / coverage reports
|
33 |
+
htmlcov/
|
34 |
+
.tox/
|
35 |
+
.coverage
|
36 |
+
.coverage.*
|
37 |
+
.cache
|
38 |
+
nosetests.xml
|
39 |
+
coverage.xml
|
40 |
+
*.cover
|
41 |
+
.hypothesis/
|
42 |
+
.pytest_cache/
|
43 |
+
|
44 |
+
# Jupyter Notebook
|
45 |
+
.ipynb_checkpoints
|
46 |
+
|
47 |
+
# pyenv
|
48 |
+
.python-version
|
49 |
+
|
50 |
+
# celery beat schedule file
|
51 |
+
celerybeat-schedule
|
52 |
+
|
53 |
+
# SageMath parsed files
|
54 |
+
*.sage.py
|
55 |
+
|
56 |
+
# Environments
|
57 |
+
.env
|
58 |
+
.venv
|
59 |
+
env/
|
60 |
+
venv/
|
61 |
+
ENV/
|
62 |
+
env.bak/
|
63 |
+
venv.bak/
|
64 |
+
|
65 |
+
# Spyder project settings
|
66 |
+
.spyderproject
|
67 |
+
.spyproject
|
68 |
+
|
69 |
+
# Rope project settings
|
70 |
+
.ropeproject
|
71 |
+
|
72 |
+
# mkdocs documentation
|
73 |
+
/site
|
74 |
+
|
75 |
+
# mypy
|
76 |
+
.mypy_cache/
|
77 |
+
.dmypy.json
|
78 |
+
dmypy.json
|
79 |
+
|
80 |
+
# Project specific
|
81 |
+
logs/
|
82 |
+
data/
|
83 |
+
*.log
|
84 |
+
*.csv
|
85 |
+
*.json
|
86 |
+
|
87 |
+
# IDE
|
88 |
+
.vscode/
|
89 |
+
.idea/
|
90 |
+
*.swp
|
91 |
+
*.swo
|
92 |
+
*~
|
93 |
+
|
94 |
+
# OS
|
95 |
+
.DS_Store
|
96 |
+
.DS_Store?
|
97 |
+
._*
|
98 |
+
.Spotlight-V100
|
99 |
+
.Trashes
|
100 |
+
ehthumbs.db
|
101 |
+
Thumbs.db
|
README.md
ADDED
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Algorithmic Trading System
|
2 |
+
|
3 |
+
A comprehensive algorithmic trading system with synthetic data generation, comprehensive logging, and extensive testing capabilities.
|
4 |
+
|
5 |
+
## Features
|
6 |
+
|
7 |
+
### Core Trading System
|
8 |
+
- **Agent-based Architecture**: Modular design with separate strategy and execution agents
|
9 |
+
- **Technical Analysis**: Built-in technical indicators (SMA, RSI, Bollinger Bands, MACD)
|
10 |
+
- **Risk Management**: Position sizing and drawdown limits
|
11 |
+
- **Order Execution**: Simulated broker integration with realistic execution delays
|
12 |
+
|
13 |
+
### Synthetic Data Generation
|
14 |
+
- **Realistic Market Data**: Generate OHLCV data using geometric Brownian motion
|
15 |
+
- **Multiple Frequencies**: Support for 1min, 5min, 1H, and 1D data
|
16 |
+
- **Market Scenarios**: Normal, volatile, trending, and crash market conditions
|
17 |
+
- **Tick Data**: High-frequency tick data generation for testing
|
18 |
+
- **Configurable Parameters**: Volatility, trend, noise levels, and base prices
|
19 |
+
|
20 |
+
### Comprehensive Logging
|
21 |
+
- **Multi-level Logging**: Console and file-based logging
|
22 |
+
- **Rotating Log Files**: Automatic log rotation with size limits
|
23 |
+
- **Specialized Loggers**: Separate loggers for trading, performance, and errors
|
24 |
+
- **Structured Logging**: Detailed log messages with timestamps and context
|
25 |
+
|
26 |
+
### Testing Framework
|
27 |
+
- **Unit Tests**: Comprehensive tests for all components
|
28 |
+
- **Integration Tests**: End-to-end workflow testing
|
29 |
+
- **Test Coverage**: Code coverage reporting with HTML and XML outputs
|
30 |
+
- **Mock Testing**: Isolated testing with mocked dependencies
|
31 |
+
|
32 |
+
## Installation
|
33 |
+
|
34 |
+
1. Clone the repository:
|
35 |
+
```bash
|
36 |
+
git clone <repository-url>
|
37 |
+
cd algorithmic_trading
|
38 |
+
```
|
39 |
+
|
40 |
+
2. Install dependencies:
|
41 |
+
```bash
|
42 |
+
pip install -r requirements.txt
|
43 |
+
```
|
44 |
+
|
45 |
+
## Configuration
|
46 |
+
|
47 |
+
The system is configured via `config.yaml`:
|
48 |
+
|
49 |
+
```yaml
|
50 |
+
# Data source configuration
|
51 |
+
data_source:
|
52 |
+
type: 'synthetic' # or 'csv'
|
53 |
+
path: 'data/market_data.csv'
|
54 |
+
|
55 |
+
# Trading parameters
|
56 |
+
trading:
|
57 |
+
symbol: 'AAPL'
|
58 |
+
timeframe: '1min'
|
59 |
+
capital: 100000
|
60 |
+
|
61 |
+
# Risk management
|
62 |
+
risk:
|
63 |
+
max_position: 100
|
64 |
+
max_drawdown: 0.05
|
65 |
+
|
66 |
+
# Order execution
|
67 |
+
execution:
|
68 |
+
broker_api: 'paper'
|
69 |
+
order_size: 10
|
70 |
+
delay_ms: 100
|
71 |
+
success_rate: 0.95
|
72 |
+
|
73 |
+
# Synthetic data generation
|
74 |
+
synthetic_data:
|
75 |
+
base_price: 150.0
|
76 |
+
volatility: 0.02
|
77 |
+
trend: 0.001
|
78 |
+
noise_level: 0.005
|
79 |
+
generate_data: true
|
80 |
+
data_path: 'data/synthetic_market_data.csv'
|
81 |
+
|
82 |
+
# Logging configuration
|
83 |
+
logging:
|
84 |
+
log_level: 'INFO'
|
85 |
+
log_dir: 'logs'
|
86 |
+
enable_console: true
|
87 |
+
enable_file: true
|
88 |
+
max_file_size_mb: 10
|
89 |
+
backup_count: 5
|
90 |
+
```
|
91 |
+
|
92 |
+
## Usage
|
93 |
+
|
94 |
+
### Standard Trading Mode
|
95 |
+
```bash
|
96 |
+
python -m agentic_ai_system.main
|
97 |
+
```
|
98 |
+
|
99 |
+
### Backtest Mode
|
100 |
+
```bash
|
101 |
+
python -m agentic_ai_system.main --mode backtest --start-date 2024-01-01 --end-date 2024-12-31
|
102 |
+
```
|
103 |
+
|
104 |
+
### Live Trading Mode
|
105 |
+
```bash
|
106 |
+
python -m agentic_ai_system.main --mode live --duration 60
|
107 |
+
```
|
108 |
+
|
109 |
+
### Custom Configuration
|
110 |
+
```bash
|
111 |
+
python -m agentic_ai_system.main --config custom_config.yaml
|
112 |
+
```
|
113 |
+
|
114 |
+
## Running Tests
|
115 |
+
|
116 |
+
### All Tests
|
117 |
+
```bash
|
118 |
+
pytest
|
119 |
+
```
|
120 |
+
|
121 |
+
### Unit Tests Only
|
122 |
+
```bash
|
123 |
+
pytest -m unit
|
124 |
+
```
|
125 |
+
|
126 |
+
### Integration Tests Only
|
127 |
+
```bash
|
128 |
+
pytest -m integration
|
129 |
+
```
|
130 |
+
|
131 |
+
### With Coverage Report
|
132 |
+
```bash
|
133 |
+
pytest --cov=agentic_ai_system --cov-report=html
|
134 |
+
```
|
135 |
+
|
136 |
+
### Specific Test File
|
137 |
+
```bash
|
138 |
+
pytest tests/test_synthetic_data_generator.py
|
139 |
+
```
|
140 |
+
|
141 |
+
## System Architecture
|
142 |
+
|
143 |
+
### Components
|
144 |
+
|
145 |
+
1. **SyntheticDataGenerator**: Generates realistic market data for testing
|
146 |
+
2. **DataIngestion**: Loads and validates market data from various sources
|
147 |
+
3. **StrategyAgent**: Analyzes market data and generates trading signals
|
148 |
+
4. **ExecutionAgent**: Executes trading orders with broker simulation
|
149 |
+
5. **Orchestrator**: Coordinates the entire trading workflow
|
150 |
+
6. **LoggerConfig**: Manages comprehensive logging throughout the system
|
151 |
+
|
152 |
+
### Data Flow
|
153 |
+
|
154 |
+
```
|
155 |
+
Synthetic Data Generator → Data Ingestion → Strategy Agent → Execution Agent
|
156 |
+
↓
|
157 |
+
Logging System
|
158 |
+
```
|
159 |
+
|
160 |
+
## Synthetic Data Generation
|
161 |
+
|
162 |
+
### Features
|
163 |
+
- **Geometric Brownian Motion**: Realistic price movement simulation
|
164 |
+
- **OHLCV Data**: Complete market data with open, high, low, close, and volume
|
165 |
+
- **Market Scenarios**: Different market conditions for testing
|
166 |
+
- **Configurable Parameters**: Adjustable volatility, trend, and noise levels
|
167 |
+
|
168 |
+
### Usage Examples
|
169 |
+
|
170 |
+
```python
|
171 |
+
from agentic_ai_system.synthetic_data_generator import SyntheticDataGenerator
|
172 |
+
|
173 |
+
# Initialize generator
|
174 |
+
generator = SyntheticDataGenerator(config)
|
175 |
+
|
176 |
+
# Generate OHLCV data
|
177 |
+
data = generator.generate_ohlcv_data(
|
178 |
+
symbol='AAPL',
|
179 |
+
start_date='2024-01-01',
|
180 |
+
end_date='2024-12-31',
|
181 |
+
frequency='1min'
|
182 |
+
)
|
183 |
+
|
184 |
+
# Generate tick data
|
185 |
+
tick_data = generator.generate_tick_data(
|
186 |
+
symbol='AAPL',
|
187 |
+
duration_minutes=60,
|
188 |
+
tick_interval_ms=1000
|
189 |
+
)
|
190 |
+
|
191 |
+
# Generate market scenarios
|
192 |
+
crash_data = generator.generate_market_scenarios('crash')
|
193 |
+
volatile_data = generator.generate_market_scenarios('volatile')
|
194 |
+
```
|
195 |
+
|
196 |
+
## Logging System
|
197 |
+
|
198 |
+
### Log Files
|
199 |
+
- `logs/trading_system.log`: General system logs
|
200 |
+
- `logs/trading.log`: Trading-specific logs
|
201 |
+
- `logs/performance.log`: Performance metrics
|
202 |
+
- `logs/errors.log`: Error logs
|
203 |
+
|
204 |
+
### Log Levels
|
205 |
+
- **DEBUG**: Detailed debugging information
|
206 |
+
- **INFO**: General information about system operation
|
207 |
+
- **WARNING**: Warning messages for potential issues
|
208 |
+
- **ERROR**: Error messages for failed operations
|
209 |
+
- **CRITICAL**: Critical system failures
|
210 |
+
|
211 |
+
### Usage Examples
|
212 |
+
|
213 |
+
```python
|
214 |
+
import logging
|
215 |
+
from agentic_ai_system.logger_config import setup_logging, get_logger
|
216 |
+
|
217 |
+
# Setup logging
|
218 |
+
setup_logging(config)
|
219 |
+
|
220 |
+
# Get logger for specific module
|
221 |
+
logger = get_logger(__name__)
|
222 |
+
|
223 |
+
# Log messages
|
224 |
+
logger.info("Trading signal generated")
|
225 |
+
logger.warning("High volatility detected")
|
226 |
+
logger.error("Order execution failed", exc_info=True)
|
227 |
+
```
|
228 |
+
|
229 |
+
## Testing
|
230 |
+
|
231 |
+
### Test Structure
|
232 |
+
```
|
233 |
+
tests/
|
234 |
+
├── __init__.py
|
235 |
+
├── test_synthetic_data_generator.py
|
236 |
+
├── test_strategy_agent.py
|
237 |
+
├── test_execution_agent.py
|
238 |
+
├── test_data_ingestion.py
|
239 |
+
└── test_integration.py
|
240 |
+
```
|
241 |
+
|
242 |
+
### Test Categories
|
243 |
+
- **Unit Tests**: Test individual components in isolation
|
244 |
+
- **Integration Tests**: Test complete workflows
|
245 |
+
- **Performance Tests**: Test system performance and scalability
|
246 |
+
- **Error Handling Tests**: Test error conditions and edge cases
|
247 |
+
|
248 |
+
### Running Specific Tests
|
249 |
+
|
250 |
+
```bash
|
251 |
+
# Run tests with specific markers
|
252 |
+
pytest -m unit
|
253 |
+
pytest -m integration
|
254 |
+
pytest -m slow
|
255 |
+
|
256 |
+
# Run tests with coverage
|
257 |
+
pytest --cov=agentic_ai_system --cov-report=html
|
258 |
+
|
259 |
+
# Run tests in parallel
|
260 |
+
pytest -n auto
|
261 |
+
|
262 |
+
# Run tests with verbose output
|
263 |
+
pytest -v
|
264 |
+
```
|
265 |
+
|
266 |
+
## Performance Monitoring
|
267 |
+
|
268 |
+
The system includes comprehensive performance monitoring:
|
269 |
+
|
270 |
+
- **Execution Time Tracking**: Monitor workflow execution times
|
271 |
+
- **Trade Statistics**: Track successful vs failed trades
|
272 |
+
- **Performance Metrics**: Calculate returns and drawdowns
|
273 |
+
- **Resource Usage**: Monitor memory and CPU usage
|
274 |
+
|
275 |
+
## Error Handling
|
276 |
+
|
277 |
+
The system includes robust error handling:
|
278 |
+
|
279 |
+
- **Graceful Degradation**: System continues operation despite component failures
|
280 |
+
- **Error Logging**: Comprehensive error logging with stack traces
|
281 |
+
- **Fallback Mechanisms**: Automatic fallback to synthetic data when CSV files are missing
|
282 |
+
- **Validation**: Data validation at multiple levels
|
283 |
+
|
284 |
+
## Contributing
|
285 |
+
|
286 |
+
1. Fork the repository
|
287 |
+
2. Create a feature branch
|
288 |
+
3. Add tests for new functionality
|
289 |
+
4. Ensure all tests pass
|
290 |
+
5. Submit a pull request
|
291 |
+
|
292 |
+
## License
|
293 |
+
|
294 |
+
This project is licensed under the MIT License - see the LICENSE file for details.
|
295 |
+
|
296 |
+
## Disclaimer
|
297 |
+
|
298 |
+
This is a simulation system for educational and testing purposes. It is not intended for real trading and should not be used with real money. Always test thoroughly before using any trading system with real funds.
|
agentic_ai_system/agent_base.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import abc
|
2 |
+
import logging
|
3 |
+
from typing import Dict, Any
|
4 |
+
|
5 |
+
class Agent(abc.ABC):
|
6 |
+
def __init__(self, config: Dict[str, Any]):
|
7 |
+
self.config = config
|
8 |
+
self.logger = logging.getLogger(self.__class__.__name__)
|
9 |
+
self.logger.info(f"Initialized {self.__class__.__name__}")
|
10 |
+
|
11 |
+
@abc.abstractmethod
|
12 |
+
def act(self, data: Any) -> Dict[str, Any]:
|
13 |
+
"""Produce an action given input data"""
|
14 |
+
pass
|
15 |
+
|
16 |
+
def log_action(self, action: Dict[str, Any]) -> None:
|
17 |
+
"""Log the action taken by the agent"""
|
18 |
+
self.logger.info(f"Action taken: {action}")
|
19 |
+
|
20 |
+
def log_error(self, error: Exception, context: str = "") -> None:
|
21 |
+
"""Log errors with context"""
|
22 |
+
self.logger.error(f"Error in {self.__class__.__name__}: {error}",
|
23 |
+
exc_info=True, extra={'context': context})
|
agentic_ai_system/data_ingestion.py
ADDED
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import logging
|
3 |
+
import os
|
4 |
+
from typing import Dict, Any
|
5 |
+
from .synthetic_data_generator import SyntheticDataGenerator
|
6 |
+
|
7 |
+
logger = logging.getLogger(__name__)
|
8 |
+
|
9 |
+
def load_data(config: Dict[str, Any]) -> pd.DataFrame:
|
10 |
+
"""
|
11 |
+
Load market data from file or generate synthetic data if needed.
|
12 |
+
|
13 |
+
Args:
|
14 |
+
config: Configuration dictionary
|
15 |
+
|
16 |
+
Returns:
|
17 |
+
DataFrame with market data
|
18 |
+
"""
|
19 |
+
logger.info("Starting data ingestion process")
|
20 |
+
|
21 |
+
try:
|
22 |
+
data_source = config['data_source']
|
23 |
+
data_type = data_source['type']
|
24 |
+
|
25 |
+
if data_type == 'csv':
|
26 |
+
return _load_csv_data(config)
|
27 |
+
elif data_type == 'synthetic':
|
28 |
+
return _generate_synthetic_data(config)
|
29 |
+
else:
|
30 |
+
raise ValueError(f"Unsupported data source type: {data_type}")
|
31 |
+
|
32 |
+
except Exception as e:
|
33 |
+
logger.error(f"Error in data ingestion: {e}", exc_info=True)
|
34 |
+
raise
|
35 |
+
|
36 |
+
def _load_csv_data(config: Dict[str, Any]) -> pd.DataFrame:
|
37 |
+
"""Load data from CSV file"""
|
38 |
+
path = config['data_source']['path']
|
39 |
+
|
40 |
+
if not os.path.exists(path):
|
41 |
+
logger.warning(f"CSV file not found at {path}, generating synthetic data instead")
|
42 |
+
return _generate_synthetic_data(config)
|
43 |
+
|
44 |
+
logger.info(f"Loading data from CSV: {path}")
|
45 |
+
df = pd.read_csv(path)
|
46 |
+
|
47 |
+
# Validate data
|
48 |
+
required_columns = ['timestamp', 'open', 'high', 'low', 'close', 'volume']
|
49 |
+
missing_columns = [col for col in required_columns if col not in df.columns]
|
50 |
+
|
51 |
+
if missing_columns:
|
52 |
+
logger.warning(f"Missing columns in CSV: {missing_columns}")
|
53 |
+
logger.info("Generating synthetic data instead")
|
54 |
+
return _generate_synthetic_data(config)
|
55 |
+
|
56 |
+
# Convert timestamp to datetime
|
57 |
+
df['timestamp'] = pd.to_datetime(df['timestamp'])
|
58 |
+
|
59 |
+
logger.info(f"Successfully loaded {len(df)} data points from CSV")
|
60 |
+
return df
|
61 |
+
|
62 |
+
def _generate_synthetic_data(config: Dict[str, Any]) -> pd.DataFrame:
|
63 |
+
"""Generate synthetic data using the SyntheticDataGenerator"""
|
64 |
+
logger.info("Generating synthetic market data")
|
65 |
+
|
66 |
+
try:
|
67 |
+
# Create data directory if it doesn't exist
|
68 |
+
data_path = config['synthetic_data']['data_path']
|
69 |
+
os.makedirs(os.path.dirname(data_path), exist_ok=True)
|
70 |
+
|
71 |
+
# Initialize synthetic data generator
|
72 |
+
generator = SyntheticDataGenerator(config)
|
73 |
+
|
74 |
+
# Generate OHLCV data
|
75 |
+
df = generator.generate_ohlcv_data(
|
76 |
+
symbol=config['trading']['symbol'],
|
77 |
+
start_date='2024-01-01',
|
78 |
+
end_date='2024-12-31',
|
79 |
+
frequency=config['trading']['timeframe']
|
80 |
+
)
|
81 |
+
|
82 |
+
# Save to CSV if configured
|
83 |
+
if config['synthetic_data'].get('generate_data', True):
|
84 |
+
generator.save_to_csv(df, data_path)
|
85 |
+
logger.info(f"Saved synthetic data to {data_path}")
|
86 |
+
|
87 |
+
return df
|
88 |
+
|
89 |
+
except Exception as e:
|
90 |
+
logger.error(f"Error generating synthetic data: {e}", exc_info=True)
|
91 |
+
raise
|
92 |
+
|
93 |
+
def validate_data(df: pd.DataFrame) -> bool:
|
94 |
+
"""
|
95 |
+
Validate the loaded data for required fields and data quality.
|
96 |
+
|
97 |
+
Args:
|
98 |
+
df: DataFrame to validate
|
99 |
+
|
100 |
+
Returns:
|
101 |
+
True if data is valid, False otherwise
|
102 |
+
"""
|
103 |
+
logger.info("Validating data quality")
|
104 |
+
|
105 |
+
try:
|
106 |
+
# Check for required columns
|
107 |
+
required_columns = ['timestamp', 'open', 'high', 'low', 'close', 'volume']
|
108 |
+
missing_columns = [col for col in required_columns if col not in df.columns]
|
109 |
+
|
110 |
+
if missing_columns:
|
111 |
+
logger.error(f"Missing required columns: {missing_columns}")
|
112 |
+
return False
|
113 |
+
|
114 |
+
# Check for null values
|
115 |
+
null_counts = df[required_columns].isnull().sum()
|
116 |
+
if null_counts.sum() > 0:
|
117 |
+
logger.warning(f"Found null values: {null_counts.to_dict()}")
|
118 |
+
|
119 |
+
# Check for negative prices
|
120 |
+
price_columns = ['open', 'high', 'low', 'close']
|
121 |
+
negative_prices = df[price_columns].lt(0).any().any()
|
122 |
+
if negative_prices:
|
123 |
+
logger.error("Found negative prices in data")
|
124 |
+
return False
|
125 |
+
|
126 |
+
# Check for negative volumes
|
127 |
+
if (df['volume'] < 0).any():
|
128 |
+
logger.error("Found negative volumes in data")
|
129 |
+
return False
|
130 |
+
|
131 |
+
# Check OHLC consistency
|
132 |
+
invalid_ohlc = (
|
133 |
+
(df['high'] < df['low']) |
|
134 |
+
(df['open'] > df['high']) |
|
135 |
+
(df['open'] < df['low']) |
|
136 |
+
(df['close'] > df['high']) |
|
137 |
+
(df['close'] < df['low'])
|
138 |
+
)
|
139 |
+
|
140 |
+
if invalid_ohlc.any():
|
141 |
+
logger.error(f"Found {invalid_ohlc.sum()} rows with invalid OHLC data")
|
142 |
+
return False
|
143 |
+
|
144 |
+
logger.info("Data validation passed")
|
145 |
+
return True
|
146 |
+
|
147 |
+
except Exception as e:
|
148 |
+
logger.error(f"Error during data validation: {e}", exc_info=True)
|
149 |
+
return False
|
agentic_ai_system/execution_agent.py
ADDED
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import time
|
3 |
+
from typing import Dict, Any, Optional
|
4 |
+
from .agent_base import Agent
|
5 |
+
|
6 |
+
class ExecutionAgent(Agent):
|
7 |
+
def __init__(self, config: Dict[str, Any]):
|
8 |
+
super().__init__(config)
|
9 |
+
self.broker_api = config['execution']['broker_api']
|
10 |
+
self.order_size = config['execution']['order_size']
|
11 |
+
self.execution_delay = config.get('execution', {}).get('delay_ms', 100)
|
12 |
+
self.success_rate = config.get('execution', {}).get('success_rate', 0.95)
|
13 |
+
self.logger.info(f"Execution agent initialized with {self.broker_api} broker")
|
14 |
+
|
15 |
+
def act(self, signal: Dict[str, Any]) -> Dict[str, Any]:
|
16 |
+
"""
|
17 |
+
Execute trading signal by sending order to broker.
|
18 |
+
|
19 |
+
Args:
|
20 |
+
signal: Dictionary containing trading signal
|
21 |
+
|
22 |
+
Returns:
|
23 |
+
Dictionary containing execution result
|
24 |
+
"""
|
25 |
+
try:
|
26 |
+
self.logger.info(f"Processing execution signal: {signal['action']} {signal['quantity']} {signal['symbol']}")
|
27 |
+
|
28 |
+
# Validate signal
|
29 |
+
if not self._validate_signal(signal):
|
30 |
+
self.logger.warning("Invalid signal received, skipping execution")
|
31 |
+
return self._generate_execution_result(signal, success=False, error="Invalid signal")
|
32 |
+
|
33 |
+
# Simulate order execution
|
34 |
+
execution_result = self._execute_order(signal)
|
35 |
+
|
36 |
+
# Log execution result
|
37 |
+
self.log_action(execution_result)
|
38 |
+
|
39 |
+
return execution_result
|
40 |
+
|
41 |
+
except Exception as e:
|
42 |
+
self.log_error(e, "Error in order execution")
|
43 |
+
return self._generate_execution_result(signal, success=False, error=str(e))
|
44 |
+
|
45 |
+
def _validate_signal(self, signal: Dict[str, Any]) -> bool:
|
46 |
+
"""Validate trading signal"""
|
47 |
+
try:
|
48 |
+
required_fields = ['action', 'symbol', 'quantity']
|
49 |
+
|
50 |
+
# Check required fields
|
51 |
+
for field in required_fields:
|
52 |
+
if field not in signal:
|
53 |
+
self.logger.error(f"Missing required field: {field}")
|
54 |
+
return False
|
55 |
+
|
56 |
+
# Validate action
|
57 |
+
if signal['action'] not in ['buy', 'sell', 'hold']:
|
58 |
+
self.logger.error(f"Invalid action: {signal['action']}")
|
59 |
+
return False
|
60 |
+
|
61 |
+
# Validate quantity
|
62 |
+
if signal['quantity'] <= 0 and signal['action'] != 'hold':
|
63 |
+
self.logger.error(f"Invalid quantity: {signal['quantity']}")
|
64 |
+
return False
|
65 |
+
|
66 |
+
# Validate symbol
|
67 |
+
if not signal['symbol'] or not isinstance(signal['symbol'], str):
|
68 |
+
self.logger.error(f"Invalid symbol: {signal['symbol']}")
|
69 |
+
return False
|
70 |
+
|
71 |
+
return True
|
72 |
+
|
73 |
+
except Exception as e:
|
74 |
+
self.log_error(e, "Error validating signal")
|
75 |
+
return False
|
76 |
+
|
77 |
+
def _execute_order(self, signal: Dict[str, Any]) -> Dict[str, Any]:
|
78 |
+
"""Execute order with broker simulation"""
|
79 |
+
try:
|
80 |
+
# Simulate execution delay
|
81 |
+
time.sleep(self.execution_delay / 1000.0)
|
82 |
+
|
83 |
+
# Simulate execution success/failure
|
84 |
+
import random
|
85 |
+
success = random.random() < self.success_rate
|
86 |
+
|
87 |
+
if signal['action'] == 'hold':
|
88 |
+
success = True # Hold actions always succeed
|
89 |
+
|
90 |
+
if success:
|
91 |
+
return self._simulate_successful_execution(signal)
|
92 |
+
else:
|
93 |
+
return self._simulate_failed_execution(signal)
|
94 |
+
|
95 |
+
except Exception as e:
|
96 |
+
self.log_error(e, "Error in order execution simulation")
|
97 |
+
return self._generate_execution_result(signal, success=False, error=str(e))
|
98 |
+
|
99 |
+
def _simulate_successful_execution(self, signal: Dict[str, Any]) -> Dict[str, Any]:
|
100 |
+
"""Simulate successful order execution"""
|
101 |
+
try:
|
102 |
+
# Generate execution details
|
103 |
+
execution_price = signal.get('price', 0)
|
104 |
+
if execution_price == 0:
|
105 |
+
# Simulate price slippage
|
106 |
+
import random
|
107 |
+
slippage = random.uniform(-0.001, 0.001) # ±0.1% slippage
|
108 |
+
execution_price = signal.get('price', 100) * (1 + slippage)
|
109 |
+
|
110 |
+
execution_time = time.time()
|
111 |
+
|
112 |
+
# Calculate fees (simplified)
|
113 |
+
commission = self._calculate_commission(signal)
|
114 |
+
|
115 |
+
result = {
|
116 |
+
'order_id': self._generate_order_id(),
|
117 |
+
'status': 'filled',
|
118 |
+
'action': signal['action'],
|
119 |
+
'symbol': signal['symbol'],
|
120 |
+
'quantity': signal['quantity'],
|
121 |
+
'price': round(execution_price, 4),
|
122 |
+
'execution_time': execution_time,
|
123 |
+
'commission': commission,
|
124 |
+
'total_value': round(signal['quantity'] * execution_price, 2),
|
125 |
+
'success': True,
|
126 |
+
'error': None
|
127 |
+
}
|
128 |
+
|
129 |
+
self.logger.info(f"Order executed successfully: {result['order_id']} - "
|
130 |
+
f"{result['action']} {result['quantity']} {result['symbol']} @ {result['price']}")
|
131 |
+
|
132 |
+
return result
|
133 |
+
|
134 |
+
except Exception as e:
|
135 |
+
self.log_error(e, "Error in successful execution simulation")
|
136 |
+
return self._generate_execution_result(signal, success=False, error=str(e))
|
137 |
+
|
138 |
+
def _simulate_failed_execution(self, signal: Dict[str, Any]) -> Dict[str, Any]:
|
139 |
+
"""Simulate failed order execution"""
|
140 |
+
error_reasons = [
|
141 |
+
"Insufficient funds",
|
142 |
+
"Market closed",
|
143 |
+
"Invalid order",
|
144 |
+
"Network timeout",
|
145 |
+
"Broker error"
|
146 |
+
]
|
147 |
+
|
148 |
+
import random
|
149 |
+
error_reason = random.choice(error_reasons)
|
150 |
+
|
151 |
+
result = self._generate_execution_result(signal, success=False, error=error_reason)
|
152 |
+
|
153 |
+
self.logger.warning(f"Order execution failed: {error_reason}")
|
154 |
+
|
155 |
+
return result
|
156 |
+
|
157 |
+
def _generate_execution_result(self, signal: Dict[str, Any], success: bool, error: Optional[str] = None) -> Dict[str, Any]:
|
158 |
+
"""Generate execution result"""
|
159 |
+
return {
|
160 |
+
'order_id': self._generate_order_id() if success else None,
|
161 |
+
'status': 'filled' if success else 'rejected',
|
162 |
+
'action': signal.get('action', 'unknown'),
|
163 |
+
'symbol': signal.get('symbol', 'unknown'),
|
164 |
+
'quantity': signal.get('quantity', 0),
|
165 |
+
'price': signal.get('price', 0) if success else 0, # Price is 0 for failed executions
|
166 |
+
'execution_time': time.time(),
|
167 |
+
'commission': 0,
|
168 |
+
'total_value': 0,
|
169 |
+
'success': success,
|
170 |
+
'error': error
|
171 |
+
}
|
172 |
+
|
173 |
+
def _calculate_commission(self, signal: Dict[str, Any]) -> float:
|
174 |
+
"""Calculate commission for the order"""
|
175 |
+
try:
|
176 |
+
# Simple commission calculation
|
177 |
+
base_commission = 1.0 # $1 base commission
|
178 |
+
per_share_commission = 0.01 # $0.01 per share
|
179 |
+
|
180 |
+
if signal['action'] == 'hold':
|
181 |
+
return 0.0
|
182 |
+
|
183 |
+
commission = base_commission + (signal['quantity'] * per_share_commission)
|
184 |
+
return round(commission, 2)
|
185 |
+
|
186 |
+
except Exception as e:
|
187 |
+
self.log_error(e, "Error calculating commission")
|
188 |
+
return 0.0
|
189 |
+
|
190 |
+
def _generate_order_id(self) -> str:
|
191 |
+
"""Generate unique order ID"""
|
192 |
+
import uuid
|
193 |
+
return f"ORD_{uuid.uuid4().hex[:8].upper()}"
|
194 |
+
|
195 |
+
def get_execution_statistics(self) -> Dict[str, Any]:
|
196 |
+
"""Get execution statistics"""
|
197 |
+
# This would typically track real execution statistics
|
198 |
+
# For now, return placeholder data
|
199 |
+
return {
|
200 |
+
'total_orders': 0,
|
201 |
+
'successful_orders': 0,
|
202 |
+
'failed_orders': 0,
|
203 |
+
'success_rate': 0.0,
|
204 |
+
'average_execution_time': 0.0,
|
205 |
+
'total_commission': 0.0
|
206 |
+
}
|
agentic_ai_system/logger_config.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import logging.handlers
|
3 |
+
import os
|
4 |
+
from datetime import datetime
|
5 |
+
from typing import Dict, Optional
|
6 |
+
|
7 |
+
def setup_logging(config: Dict, log_level: str = 'INFO') -> None:
|
8 |
+
"""
|
9 |
+
Set up comprehensive logging for the trading system.
|
10 |
+
|
11 |
+
Args:
|
12 |
+
config: Configuration dictionary
|
13 |
+
log_level: Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
|
14 |
+
"""
|
15 |
+
# Create logs directory if it doesn't exist
|
16 |
+
log_dir = config.get('logging', {}).get('log_dir', 'logs')
|
17 |
+
os.makedirs(log_dir, exist_ok=True)
|
18 |
+
|
19 |
+
# Configure root logger
|
20 |
+
root_logger = logging.getLogger()
|
21 |
+
root_logger.setLevel(getattr(logging, log_level.upper()))
|
22 |
+
|
23 |
+
# Clear any existing handlers
|
24 |
+
root_logger.handlers.clear()
|
25 |
+
|
26 |
+
# Create formatters
|
27 |
+
detailed_formatter = logging.Formatter(
|
28 |
+
'%(asctime)s - %(name)s - %(levelname)s - %(funcName)s:%(lineno)d - %(message)s'
|
29 |
+
)
|
30 |
+
|
31 |
+
simple_formatter = logging.Formatter(
|
32 |
+
'%(asctime)s - %(levelname)s - %(message)s'
|
33 |
+
)
|
34 |
+
|
35 |
+
# Console handler
|
36 |
+
console_handler = logging.StreamHandler()
|
37 |
+
console_handler.setLevel(logging.INFO)
|
38 |
+
console_handler.setFormatter(simple_formatter)
|
39 |
+
root_logger.addHandler(console_handler)
|
40 |
+
|
41 |
+
# File handler for all logs
|
42 |
+
all_logs_file = os.path.join(log_dir, 'trading_system.log')
|
43 |
+
file_handler = logging.handlers.RotatingFileHandler(
|
44 |
+
all_logs_file,
|
45 |
+
maxBytes=10*1024*1024, # 10MB
|
46 |
+
backupCount=5
|
47 |
+
)
|
48 |
+
file_handler.setLevel(logging.DEBUG)
|
49 |
+
file_handler.setFormatter(detailed_formatter)
|
50 |
+
root_logger.addHandler(file_handler)
|
51 |
+
|
52 |
+
# Error log file
|
53 |
+
error_log_file = os.path.join(log_dir, 'errors.log')
|
54 |
+
error_handler = logging.handlers.RotatingFileHandler(
|
55 |
+
error_log_file,
|
56 |
+
maxBytes=5*1024*1024, # 5MB
|
57 |
+
backupCount=3
|
58 |
+
)
|
59 |
+
error_handler.setLevel(logging.ERROR)
|
60 |
+
error_handler.setFormatter(detailed_formatter)
|
61 |
+
root_logger.addHandler(error_handler)
|
62 |
+
|
63 |
+
# Trading-specific log file
|
64 |
+
trading_log_file = os.path.join(log_dir, 'trading.log')
|
65 |
+
trading_handler = logging.handlers.RotatingFileHandler(
|
66 |
+
trading_log_file,
|
67 |
+
maxBytes=10*1024*1024, # 10MB
|
68 |
+
backupCount=5
|
69 |
+
)
|
70 |
+
trading_handler.setLevel(logging.INFO)
|
71 |
+
trading_handler.setFormatter(detailed_formatter)
|
72 |
+
|
73 |
+
# Create trading logger
|
74 |
+
trading_logger = logging.getLogger('trading')
|
75 |
+
trading_logger.addHandler(trading_handler)
|
76 |
+
trading_logger.setLevel(logging.INFO)
|
77 |
+
trading_logger.propagate = False
|
78 |
+
|
79 |
+
# Performance log file
|
80 |
+
performance_log_file = os.path.join(log_dir, 'performance.log')
|
81 |
+
performance_handler = logging.handlers.RotatingFileHandler(
|
82 |
+
performance_log_file,
|
83 |
+
maxBytes=5*1024*1024, # 5MB
|
84 |
+
backupCount=3
|
85 |
+
)
|
86 |
+
performance_handler.setLevel(logging.INFO)
|
87 |
+
performance_handler.setFormatter(detailed_formatter)
|
88 |
+
|
89 |
+
# Create performance logger
|
90 |
+
performance_logger = logging.getLogger('performance')
|
91 |
+
performance_logger.addHandler(performance_handler)
|
92 |
+
performance_logger.setLevel(logging.INFO)
|
93 |
+
performance_logger.propagate = False
|
94 |
+
|
95 |
+
logging.info(f"Logging system initialized. Log files in: {log_dir}")
|
96 |
+
|
97 |
+
def get_logger(name: str) -> logging.Logger:
|
98 |
+
"""
|
99 |
+
Get a logger instance for a specific module.
|
100 |
+
|
101 |
+
Args:
|
102 |
+
name: Logger name (usually __name__)
|
103 |
+
|
104 |
+
Returns:
|
105 |
+
Logger instance
|
106 |
+
"""
|
107 |
+
return logging.getLogger(name)
|
108 |
+
|
109 |
+
def log_trade(logger: logging.Logger, trade_data: Dict) -> None:
|
110 |
+
"""
|
111 |
+
Log trade execution details.
|
112 |
+
|
113 |
+
Args:
|
114 |
+
logger: Logger instance
|
115 |
+
trade_data: Dictionary containing trade information
|
116 |
+
"""
|
117 |
+
logger.info(f"TRADE EXECUTED: {trade_data}")
|
118 |
+
|
119 |
+
def log_performance(logger: logging.Logger, performance_data: Dict) -> None:
|
120 |
+
"""
|
121 |
+
Log performance metrics.
|
122 |
+
|
123 |
+
Args:
|
124 |
+
logger: Logger instance
|
125 |
+
performance_data: Dictionary containing performance metrics
|
126 |
+
"""
|
127 |
+
perf_logger = logging.getLogger('performance')
|
128 |
+
perf_logger.info(f"PERFORMANCE: {performance_data}")
|
129 |
+
|
130 |
+
def log_error(logger: logging.Logger, error: Exception, context: Optional[str] = None) -> None:
|
131 |
+
"""
|
132 |
+
Log errors with context.
|
133 |
+
|
134 |
+
Args:
|
135 |
+
logger: Logger instance
|
136 |
+
error: Exception that occurred
|
137 |
+
context: Additional context information
|
138 |
+
"""
|
139 |
+
error_msg = f"ERROR: {type(error).__name__}: {str(error)}"
|
140 |
+
if context:
|
141 |
+
error_msg += f" | Context: {context}"
|
142 |
+
logger.error(error_msg, exc_info=True)
|
agentic_ai_system/main.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import yaml
|
2 |
+
import logging
|
3 |
+
import sys
|
4 |
+
from typing import Dict, Any
|
5 |
+
from .orchestrator import run, run_backtest, run_live_trading
|
6 |
+
from .logger_config import setup_logging
|
7 |
+
|
8 |
+
def main():
|
9 |
+
"""Main entry point for the trading system"""
|
10 |
+
try:
|
11 |
+
# Load configuration
|
12 |
+
config = load_config()
|
13 |
+
|
14 |
+
# Setup logging
|
15 |
+
setup_logging(config)
|
16 |
+
logger = logging.getLogger(__name__)
|
17 |
+
|
18 |
+
logger.info("Starting algorithmic trading system")
|
19 |
+
|
20 |
+
# Run the trading workflow
|
21 |
+
result = run(config)
|
22 |
+
|
23 |
+
if result['success']:
|
24 |
+
logger.info("Trading workflow completed successfully")
|
25 |
+
if result['order_executed']:
|
26 |
+
logger.info(f"Order executed: {result['execution_result']}")
|
27 |
+
else:
|
28 |
+
logger.error(f"Trading workflow failed: {result['errors']}")
|
29 |
+
|
30 |
+
except Exception as e:
|
31 |
+
print(f"Fatal error: {e}")
|
32 |
+
sys.exit(1)
|
33 |
+
|
34 |
+
def load_config(config_path: str = 'config.yaml') -> Dict[str, Any]:
|
35 |
+
"""Load configuration from YAML file"""
|
36 |
+
try:
|
37 |
+
with open(config_path, 'r') as f:
|
38 |
+
config = yaml.safe_load(f)
|
39 |
+
return config
|
40 |
+
except FileNotFoundError:
|
41 |
+
print(f"Configuration file not found: {config_path}")
|
42 |
+
sys.exit(1)
|
43 |
+
except yaml.YAMLError as e:
|
44 |
+
print(f"Error parsing configuration file: {e}")
|
45 |
+
sys.exit(1)
|
46 |
+
|
47 |
+
def run_backtest_mode(config_path: str = 'config.yaml',
|
48 |
+
start_date: str = '2024-01-01',
|
49 |
+
end_date: str = '2024-12-31'):
|
50 |
+
"""Run the system in backtest mode"""
|
51 |
+
try:
|
52 |
+
config = load_config(config_path)
|
53 |
+
setup_logging(config)
|
54 |
+
logger = logging.getLogger(__name__)
|
55 |
+
|
56 |
+
logger.info("Running in backtest mode")
|
57 |
+
result = run_backtest(config, start_date, end_date)
|
58 |
+
|
59 |
+
if result['success']:
|
60 |
+
logger.info(f"Backtest completed: {result['total_return']:.2%} return")
|
61 |
+
logger.info(f"Total trades: {result['total_trades']}")
|
62 |
+
else:
|
63 |
+
logger.error(f"Backtest failed: {result['error']}")
|
64 |
+
|
65 |
+
except Exception as e:
|
66 |
+
print(f"Backtest error: {e}")
|
67 |
+
sys.exit(1)
|
68 |
+
|
69 |
+
def run_live_mode(config_path: str = 'config.yaml', duration_minutes: int = 60):
|
70 |
+
"""Run the system in live trading mode"""
|
71 |
+
try:
|
72 |
+
config = load_config(config_path)
|
73 |
+
setup_logging(config)
|
74 |
+
logger = logging.getLogger(__name__)
|
75 |
+
|
76 |
+
logger.info("Running in live trading mode")
|
77 |
+
result = run_live_trading(config, duration_minutes)
|
78 |
+
|
79 |
+
if result['success']:
|
80 |
+
logger.info(f"Live trading completed: {result['total_trades']} trades")
|
81 |
+
else:
|
82 |
+
logger.error(f"Live trading failed: {result['error']}")
|
83 |
+
|
84 |
+
except Exception as e:
|
85 |
+
print(f"Live trading error: {e}")
|
86 |
+
sys.exit(1)
|
87 |
+
|
88 |
+
if __name__ == '__main__':
|
89 |
+
import argparse
|
90 |
+
|
91 |
+
parser = argparse.ArgumentParser(description='Algorithmic Trading System')
|
92 |
+
parser.add_argument('--mode', choices=['standard', 'backtest', 'live'],
|
93 |
+
default='standard', help='Run mode')
|
94 |
+
parser.add_argument('--config', default='config.yaml', help='Configuration file path')
|
95 |
+
parser.add_argument('--start-date', default='2024-01-01', help='Backtest start date')
|
96 |
+
parser.add_argument('--end-date', default='2024-12-31', help='Backtest end date')
|
97 |
+
parser.add_argument('--duration', type=int, default=60, help='Live trading duration (minutes)')
|
98 |
+
|
99 |
+
args = parser.parse_args()
|
100 |
+
|
101 |
+
if args.mode == 'backtest':
|
102 |
+
run_backtest_mode(args.config, args.start_date, args.end_date)
|
103 |
+
elif args.mode == 'live':
|
104 |
+
run_live_mode(args.config, args.duration)
|
105 |
+
else:
|
106 |
+
main()
|
agentic_ai_system/orchestrator.py
ADDED
@@ -0,0 +1,244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import time
|
3 |
+
import pandas as pd
|
4 |
+
from typing import Dict, Any, Optional
|
5 |
+
from .data_ingestion import load_data, validate_data
|
6 |
+
from .strategy_agent import StrategyAgent
|
7 |
+
from .execution_agent import ExecutionAgent
|
8 |
+
|
9 |
+
logger = logging.getLogger(__name__)
|
10 |
+
|
11 |
+
def run(config: Dict[str, Any]) -> Dict[str, Any]:
|
12 |
+
"""
|
13 |
+
Main orchestration function that coordinates the trading workflow.
|
14 |
+
|
15 |
+
Args:
|
16 |
+
config: Configuration dictionary
|
17 |
+
|
18 |
+
Returns:
|
19 |
+
Dictionary containing execution results and statistics
|
20 |
+
"""
|
21 |
+
start_time = time.time()
|
22 |
+
logger.info("Starting trading system orchestration")
|
23 |
+
|
24 |
+
try:
|
25 |
+
# Initialize workflow results
|
26 |
+
workflow_result = {
|
27 |
+
'success': False,
|
28 |
+
'data_loaded': False,
|
29 |
+
'signal_generated': False,
|
30 |
+
'order_executed': False,
|
31 |
+
'execution_result': None,
|
32 |
+
'errors': [],
|
33 |
+
'execution_time': 0
|
34 |
+
}
|
35 |
+
|
36 |
+
# Step 1: Load market data
|
37 |
+
logger.info("Step 1: Loading market data")
|
38 |
+
data = load_data(config)
|
39 |
+
|
40 |
+
if data is not None and not data.empty:
|
41 |
+
workflow_result['data_loaded'] = True
|
42 |
+
logger.info(f"Successfully loaded {len(data)} data points")
|
43 |
+
|
44 |
+
# Validate data quality
|
45 |
+
if validate_data(data):
|
46 |
+
logger.info("Data validation passed")
|
47 |
+
else:
|
48 |
+
logger.warning("Data validation failed, but continuing with workflow")
|
49 |
+
else:
|
50 |
+
logger.error("Failed to load market data")
|
51 |
+
workflow_result['errors'].append("Failed to load market data")
|
52 |
+
return workflow_result
|
53 |
+
|
54 |
+
# Step 2: Generate trading signal
|
55 |
+
logger.info("Step 2: Generating trading signal")
|
56 |
+
strategy_agent = StrategyAgent(config)
|
57 |
+
signal = strategy_agent.act(data)
|
58 |
+
|
59 |
+
if signal and signal.get('action') != 'hold':
|
60 |
+
workflow_result['signal_generated'] = True
|
61 |
+
logger.info(f"Generated signal: {signal['action']} {signal['quantity']} {signal['symbol']}")
|
62 |
+
else:
|
63 |
+
logger.info("No actionable signal generated (hold)")
|
64 |
+
workflow_result['signal_generated'] = True # Hold is still a valid signal
|
65 |
+
|
66 |
+
# Step 3: Execute order
|
67 |
+
logger.info("Step 3: Executing order")
|
68 |
+
execution_agent = ExecutionAgent(config)
|
69 |
+
execution_result = execution_agent.act(signal)
|
70 |
+
|
71 |
+
if execution_result['success']:
|
72 |
+
workflow_result['order_executed'] = True
|
73 |
+
workflow_result['execution_result'] = execution_result
|
74 |
+
logger.info("Order executed successfully")
|
75 |
+
else:
|
76 |
+
logger.error(f"Order execution failed: {execution_result.get('error', 'Unknown error')}")
|
77 |
+
workflow_result['errors'].append(f"Order execution failed: {execution_result.get('error')}")
|
78 |
+
|
79 |
+
# Calculate execution time
|
80 |
+
workflow_result['execution_time'] = time.time() - start_time
|
81 |
+
workflow_result['success'] = workflow_result['data_loaded'] and workflow_result['signal_generated']
|
82 |
+
|
83 |
+
logger.info(f"Trading workflow completed in {workflow_result['execution_time']:.2f} seconds")
|
84 |
+
|
85 |
+
return workflow_result
|
86 |
+
|
87 |
+
except Exception as e:
|
88 |
+
logger.error(f"Error in trading workflow: {e}", exc_info=True)
|
89 |
+
workflow_result = {
|
90 |
+
'success': False,
|
91 |
+
'data_loaded': False,
|
92 |
+
'signal_generated': False,
|
93 |
+
'order_executed': False,
|
94 |
+
'execution_result': None,
|
95 |
+
'errors': [str(e)],
|
96 |
+
'execution_time': time.time() - start_time
|
97 |
+
}
|
98 |
+
return workflow_result
|
99 |
+
|
100 |
+
def run_backtest(config: Dict[str, Any], start_date: str = '2024-01-01', end_date: str = '2024-12-31') -> Dict[str, Any]:
|
101 |
+
"""
|
102 |
+
Run backtesting simulation over historical data.
|
103 |
+
|
104 |
+
Args:
|
105 |
+
config: Configuration dictionary
|
106 |
+
start_date: Start date for backtest
|
107 |
+
end_date: End date for backtest
|
108 |
+
|
109 |
+
Returns:
|
110 |
+
Dictionary containing backtest results
|
111 |
+
"""
|
112 |
+
logger.info(f"Starting backtest from {start_date} to {end_date}")
|
113 |
+
|
114 |
+
try:
|
115 |
+
# Load historical data
|
116 |
+
data = load_data(config)
|
117 |
+
|
118 |
+
if data is None or data.empty:
|
119 |
+
logger.error("No data available for backtest")
|
120 |
+
return {'success': False, 'error': 'No data available'}
|
121 |
+
|
122 |
+
# Filter data for backtest period
|
123 |
+
data['timestamp'] = pd.to_datetime(data['timestamp'])
|
124 |
+
mask = (data['timestamp'] >= start_date) & (data['timestamp'] <= end_date)
|
125 |
+
backtest_data = data.loc[mask]
|
126 |
+
|
127 |
+
if backtest_data.empty:
|
128 |
+
logger.error("No data available for specified backtest period")
|
129 |
+
return {'success': False, 'error': 'No data for backtest period'}
|
130 |
+
|
131 |
+
logger.info(f"Running backtest on {len(backtest_data)} data points")
|
132 |
+
|
133 |
+
# Initialize agents
|
134 |
+
strategy_agent = StrategyAgent(config)
|
135 |
+
execution_agent = ExecutionAgent(config)
|
136 |
+
|
137 |
+
# Track backtest results
|
138 |
+
trades = []
|
139 |
+
portfolio_value = config['trading']['capital']
|
140 |
+
positions = {}
|
141 |
+
|
142 |
+
# Run simulation
|
143 |
+
for i in range(len(backtest_data)):
|
144 |
+
current_data = backtest_data.iloc[:i+1]
|
145 |
+
|
146 |
+
if len(current_data) < 50: # Need minimum data for indicators
|
147 |
+
continue
|
148 |
+
|
149 |
+
# Generate signal
|
150 |
+
signal = strategy_agent.act(current_data)
|
151 |
+
|
152 |
+
# Execute if not hold
|
153 |
+
if signal['action'] != 'hold':
|
154 |
+
execution_result = execution_agent.act(signal)
|
155 |
+
trades.append({
|
156 |
+
'timestamp': current_data.index[-1],
|
157 |
+
'signal': signal,
|
158 |
+
'execution': execution_result
|
159 |
+
})
|
160 |
+
|
161 |
+
# Update portfolio (simplified)
|
162 |
+
if execution_result['success']:
|
163 |
+
symbol = signal['symbol']
|
164 |
+
if signal['action'] == 'buy':
|
165 |
+
positions[symbol] = positions.get(symbol, 0) + signal['quantity']
|
166 |
+
portfolio_value -= execution_result['total_value']
|
167 |
+
elif signal['action'] == 'sell':
|
168 |
+
positions[symbol] = positions.get(symbol, 0) - signal['quantity']
|
169 |
+
portfolio_value += execution_result['total_value']
|
170 |
+
|
171 |
+
# Calculate final portfolio value
|
172 |
+
final_value = portfolio_value
|
173 |
+
for symbol, quantity in positions.items():
|
174 |
+
if quantity > 0:
|
175 |
+
final_price = backtest_data['close'].iloc[-1]
|
176 |
+
final_value += quantity * final_price
|
177 |
+
|
178 |
+
# Calculate performance metrics
|
179 |
+
total_return = (final_value - config['trading']['capital']) / config['trading']['capital']
|
180 |
+
|
181 |
+
backtest_results = {
|
182 |
+
'success': True,
|
183 |
+
'start_date': start_date,
|
184 |
+
'end_date': end_date,
|
185 |
+
'initial_capital': config['trading']['capital'],
|
186 |
+
'final_value': final_value,
|
187 |
+
'total_return': total_return,
|
188 |
+
'total_trades': len(trades),
|
189 |
+
'trades': trades,
|
190 |
+
'positions': positions
|
191 |
+
}
|
192 |
+
|
193 |
+
logger.info(f"Backtest completed: {total_return:.2%} return over {len(trades)} trades")
|
194 |
+
return backtest_results
|
195 |
+
|
196 |
+
except Exception as e:
|
197 |
+
logger.error(f"Error in backtest: {e}", exc_info=True)
|
198 |
+
return {'success': False, 'error': str(e)}
|
199 |
+
|
200 |
+
def run_live_trading(config: Dict[str, Any], duration_minutes: int = 60) -> Dict[str, Any]:
|
201 |
+
"""
|
202 |
+
Run live trading simulation for a specified duration.
|
203 |
+
|
204 |
+
Args:
|
205 |
+
config: Configuration dictionary
|
206 |
+
duration_minutes: Duration to run live trading in minutes
|
207 |
+
|
208 |
+
Returns:
|
209 |
+
Dictionary containing live trading results
|
210 |
+
"""
|
211 |
+
logger.info(f"Starting live trading simulation for {duration_minutes} minutes")
|
212 |
+
|
213 |
+
try:
|
214 |
+
import time
|
215 |
+
from datetime import datetime, timedelta
|
216 |
+
|
217 |
+
end_time = datetime.now() + timedelta(minutes=duration_minutes)
|
218 |
+
trades = []
|
219 |
+
|
220 |
+
while datetime.now() < end_time:
|
221 |
+
# Run single trading cycle
|
222 |
+
result = run(config)
|
223 |
+
|
224 |
+
if result['order_executed'] and result['execution_result']['success']:
|
225 |
+
trades.append(result['execution_result'])
|
226 |
+
|
227 |
+
# Wait before next cycle
|
228 |
+
time.sleep(60) # Wait 1 minute between cycles
|
229 |
+
|
230 |
+
live_results = {
|
231 |
+
'success': True,
|
232 |
+
'duration_minutes': duration_minutes,
|
233 |
+
'total_trades': len(trades),
|
234 |
+
'trades': trades,
|
235 |
+
'start_time': datetime.now() - timedelta(minutes=duration_minutes),
|
236 |
+
'end_time': datetime.now()
|
237 |
+
}
|
238 |
+
|
239 |
+
logger.info(f"Live trading completed: {len(trades)} trades executed")
|
240 |
+
return live_results
|
241 |
+
|
242 |
+
except Exception as e:
|
243 |
+
logger.error(f"Error in live trading: {e}", exc_info=True)
|
244 |
+
return {'success': False, 'error': str(e)}
|
agentic_ai_system/strategy_agent.py
ADDED
@@ -0,0 +1,244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import numpy as np
|
3 |
+
import logging
|
4 |
+
from typing import Dict, Any, Optional
|
5 |
+
from .agent_base import Agent
|
6 |
+
|
7 |
+
class StrategyAgent(Agent):
|
8 |
+
def __init__(self, config: Dict[str, Any]):
|
9 |
+
super().__init__(config)
|
10 |
+
self.symbol = config['trading']['symbol']
|
11 |
+
self.capital = config['trading']['capital']
|
12 |
+
self.max_position = config['risk']['max_position']
|
13 |
+
self.max_drawdown = config['risk']['max_drawdown']
|
14 |
+
self.logger.info(f"Strategy agent initialized for {self.symbol} with capital {self.capital}")
|
15 |
+
|
16 |
+
def act(self, data: pd.DataFrame) -> Dict[str, Any]:
|
17 |
+
"""
|
18 |
+
Analyze market data and generate trading signals.
|
19 |
+
|
20 |
+
Args:
|
21 |
+
data: DataFrame with OHLCV market data
|
22 |
+
|
23 |
+
Returns:
|
24 |
+
Dictionary containing trading signal
|
25 |
+
"""
|
26 |
+
try:
|
27 |
+
self.logger.info(f"Analyzing {len(data)} data points for {self.symbol}")
|
28 |
+
|
29 |
+
# Validate data
|
30 |
+
if data.empty:
|
31 |
+
self.logger.warning("Empty data received")
|
32 |
+
return self._generate_no_action_signal()
|
33 |
+
|
34 |
+
# Calculate technical indicators
|
35 |
+
indicators = self._calculate_indicators(data)
|
36 |
+
|
37 |
+
# Generate trading signal
|
38 |
+
signal = self._generate_signal(data, indicators)
|
39 |
+
|
40 |
+
# Log the signal
|
41 |
+
self.log_action(signal)
|
42 |
+
|
43 |
+
return signal
|
44 |
+
|
45 |
+
except Exception as e:
|
46 |
+
self.log_error(e, "Error in strategy analysis")
|
47 |
+
return self._generate_no_action_signal()
|
48 |
+
|
49 |
+
def _calculate_indicators(self, data: pd.DataFrame) -> Dict[str, Any]:
|
50 |
+
"""Calculate technical indicators from market data"""
|
51 |
+
try:
|
52 |
+
close_prices = data['close'].values
|
53 |
+
|
54 |
+
# Simple Moving Averages
|
55 |
+
sma_20 = self._calculate_sma(close_prices, 20)
|
56 |
+
sma_50 = self._calculate_sma(close_prices, 50)
|
57 |
+
|
58 |
+
# RSI
|
59 |
+
rsi = self._calculate_rsi(close_prices, 14)
|
60 |
+
|
61 |
+
# Bollinger Bands
|
62 |
+
bb_upper, bb_lower = self._calculate_bollinger_bands(close_prices, 20, 2)
|
63 |
+
|
64 |
+
# MACD
|
65 |
+
macd, signal_line = self._calculate_macd(close_prices)
|
66 |
+
|
67 |
+
indicators = {
|
68 |
+
'sma_20': sma_20,
|
69 |
+
'sma_50': sma_50,
|
70 |
+
'rsi': rsi,
|
71 |
+
'bb_upper': bb_upper,
|
72 |
+
'bb_lower': bb_lower,
|
73 |
+
'macd': macd,
|
74 |
+
'macd_signal': signal_line
|
75 |
+
}
|
76 |
+
|
77 |
+
self.logger.debug(f"Calculated indicators: {list(indicators.keys())}")
|
78 |
+
return indicators
|
79 |
+
|
80 |
+
except Exception as e:
|
81 |
+
self.log_error(e, "Error calculating indicators")
|
82 |
+
return {}
|
83 |
+
|
84 |
+
def _generate_signal(self, data: pd.DataFrame, indicators: Dict[str, Any]) -> Dict[str, Any]:
|
85 |
+
"""Generate trading signal based on indicators"""
|
86 |
+
try:
|
87 |
+
if not indicators:
|
88 |
+
return self._generate_no_action_signal()
|
89 |
+
|
90 |
+
current_price = data['close'].iloc[-1]
|
91 |
+
current_volume = data['volume'].iloc[-1]
|
92 |
+
|
93 |
+
# Get latest indicator values
|
94 |
+
sma_20 = indicators['sma_20'][-1] if len(indicators['sma_20']) > 0 else current_price
|
95 |
+
sma_50 = indicators['sma_50'][-1] if len(indicators['sma_50']) > 0 else current_price
|
96 |
+
rsi = indicators['rsi'][-1] if len(indicators['rsi']) > 0 else 50
|
97 |
+
bb_upper = indicators['bb_upper'][-1] if len(indicators['bb_upper']) > 0 else current_price * 1.02
|
98 |
+
bb_lower = indicators['bb_lower'][-1] if len(indicators['bb_lower']) > 0 else current_price * 0.98
|
99 |
+
|
100 |
+
# Simple strategy: Buy when price is above SMA20 and RSI < 70
|
101 |
+
# Sell when price is below SMA20 or RSI > 80
|
102 |
+
action = 'hold'
|
103 |
+
quantity = 0
|
104 |
+
confidence = 0.5
|
105 |
+
|
106 |
+
if current_price > sma_20 and rsi < 70:
|
107 |
+
action = 'buy'
|
108 |
+
quantity = self._calculate_position_size(current_price)
|
109 |
+
confidence = 0.7
|
110 |
+
self.logger.info(f"BUY signal: Price {current_price} > SMA20 {sma_20}, RSI {rsi}")
|
111 |
+
|
112 |
+
elif current_price < sma_20 or rsi > 80:
|
113 |
+
action = 'sell'
|
114 |
+
quantity = self._calculate_position_size(current_price)
|
115 |
+
confidence = 0.6
|
116 |
+
self.logger.info(f"SELL signal: Price {current_price} < SMA20 {sma_20}, RSI {rsi}")
|
117 |
+
|
118 |
+
return {
|
119 |
+
'action': action,
|
120 |
+
'symbol': self.symbol,
|
121 |
+
'quantity': quantity,
|
122 |
+
'price': current_price,
|
123 |
+
'confidence': confidence,
|
124 |
+
'timestamp': data.index[-1] if hasattr(data.index[-1], 'timestamp') else None,
|
125 |
+
'indicators': {
|
126 |
+
'sma_20': sma_20,
|
127 |
+
'sma_50': sma_50,
|
128 |
+
'rsi': rsi,
|
129 |
+
'bb_upper': bb_upper,
|
130 |
+
'bb_lower': bb_lower
|
131 |
+
}
|
132 |
+
}
|
133 |
+
|
134 |
+
except Exception as e:
|
135 |
+
self.log_error(e, "Error generating signal")
|
136 |
+
return self._generate_no_action_signal()
|
137 |
+
|
138 |
+
def _calculate_position_size(self, price: float) -> int:
|
139 |
+
"""Calculate position size based on risk management rules"""
|
140 |
+
try:
|
141 |
+
# Simple position sizing: use 10% of capital per trade
|
142 |
+
position_value = self.capital * 0.1
|
143 |
+
quantity = int(position_value / price)
|
144 |
+
|
145 |
+
# Apply max position limit
|
146 |
+
quantity = min(quantity, self.max_position)
|
147 |
+
|
148 |
+
# Ensure minimum quantity
|
149 |
+
if quantity < 1:
|
150 |
+
quantity = 1
|
151 |
+
|
152 |
+
return quantity
|
153 |
+
|
154 |
+
except Exception as e:
|
155 |
+
self.log_error(e, "Error calculating position size")
|
156 |
+
return 1
|
157 |
+
|
158 |
+
def _generate_no_action_signal(self) -> Dict[str, Any]:
|
159 |
+
"""Generate a no-action signal"""
|
160 |
+
return {
|
161 |
+
'action': 'hold',
|
162 |
+
'symbol': self.symbol,
|
163 |
+
'quantity': 0,
|
164 |
+
'price': 0,
|
165 |
+
'confidence': 0.0,
|
166 |
+
'timestamp': None,
|
167 |
+
'indicators': {}
|
168 |
+
}
|
169 |
+
|
170 |
+
# Technical indicator calculations
|
171 |
+
def _calculate_sma(self, prices: np.ndarray, window: int) -> np.ndarray:
|
172 |
+
"""Calculate Simple Moving Average"""
|
173 |
+
if len(prices) < window:
|
174 |
+
return np.array([])
|
175 |
+
return np.convolve(prices, np.ones(window)/window, mode='valid')
|
176 |
+
|
177 |
+
def _calculate_rsi(self, prices: np.ndarray, window: int = 14) -> np.ndarray:
|
178 |
+
"""Calculate Relative Strength Index"""
|
179 |
+
if len(prices) < window + 1:
|
180 |
+
return np.array([])
|
181 |
+
|
182 |
+
deltas = np.diff(prices)
|
183 |
+
gains = np.where(deltas > 0, deltas, 0)
|
184 |
+
losses = np.where(deltas < 0, -deltas, 0)
|
185 |
+
|
186 |
+
avg_gains = np.convolve(gains, np.ones(window)/window, mode='valid')
|
187 |
+
avg_losses = np.convolve(losses, np.ones(window)/window, mode='valid')
|
188 |
+
|
189 |
+
rs = avg_gains / (avg_losses + 1e-10) # Avoid division by zero
|
190 |
+
rsi = 100 - (100 / (1 + rs))
|
191 |
+
|
192 |
+
return rsi
|
193 |
+
|
194 |
+
def _calculate_bollinger_bands(self, prices: np.ndarray, window: int = 20, std_dev: float = 2) -> tuple:
|
195 |
+
"""Calculate Bollinger Bands"""
|
196 |
+
if len(prices) < window:
|
197 |
+
return np.array([]), np.array([])
|
198 |
+
|
199 |
+
sma = self._calculate_sma(prices, window)
|
200 |
+
if len(sma) == 0:
|
201 |
+
return np.array([]), np.array([])
|
202 |
+
|
203 |
+
# Calculate rolling standard deviation
|
204 |
+
std = np.array([np.std(prices[i:i+window]) for i in range(len(prices) - window + 1)])
|
205 |
+
|
206 |
+
upper_band = sma + (std_dev * std)
|
207 |
+
lower_band = sma - (std_dev * std)
|
208 |
+
|
209 |
+
return upper_band, lower_band
|
210 |
+
|
211 |
+
def _calculate_macd(self, prices: np.ndarray, fast: int = 12, slow: int = 26, signal: int = 9) -> tuple:
|
212 |
+
"""Calculate MACD (Moving Average Convergence Divergence)"""
|
213 |
+
if len(prices) < slow:
|
214 |
+
return np.array([]), np.array([])
|
215 |
+
|
216 |
+
ema_fast = self._calculate_ema(prices, fast)
|
217 |
+
ema_slow = self._calculate_ema(prices, slow)
|
218 |
+
|
219 |
+
if len(ema_fast) == 0 or len(ema_slow) == 0:
|
220 |
+
return np.array([]), np.array([])
|
221 |
+
|
222 |
+
# Align lengths
|
223 |
+
min_len = min(len(ema_fast), len(ema_slow))
|
224 |
+
ema_fast = ema_fast[-min_len:]
|
225 |
+
ema_slow = ema_slow[-min_len:]
|
226 |
+
|
227 |
+
macd_line = ema_fast - ema_slow
|
228 |
+
signal_line = self._calculate_ema(macd_line, signal)
|
229 |
+
|
230 |
+
return macd_line, signal_line
|
231 |
+
|
232 |
+
def _calculate_ema(self, prices: np.ndarray, window: int) -> np.ndarray:
|
233 |
+
"""Calculate Exponential Moving Average"""
|
234 |
+
if len(prices) < window:
|
235 |
+
return np.array([])
|
236 |
+
|
237 |
+
alpha = 2 / (window + 1)
|
238 |
+
ema = np.zeros(len(prices))
|
239 |
+
ema[0] = prices[0]
|
240 |
+
|
241 |
+
for i in range(1, len(prices)):
|
242 |
+
ema[i] = alpha * prices[i] + (1 - alpha) * ema[i-1]
|
243 |
+
|
244 |
+
return ema[window-1:] # Return only the valid EMA values
|
agentic_ai_system/synthetic_data_generator.py
ADDED
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import numpy as np
|
3 |
+
from datetime import datetime, timedelta
|
4 |
+
import logging
|
5 |
+
from typing import Dict, List, Optional
|
6 |
+
|
7 |
+
logger = logging.getLogger(__name__)
|
8 |
+
|
9 |
+
class SyntheticDataGenerator:
|
10 |
+
"""
|
11 |
+
Generates synthetic market data for testing and development purposes.
|
12 |
+
Creates realistic price movements with volatility, trends, and market noise.
|
13 |
+
"""
|
14 |
+
|
15 |
+
def __init__(self, config: Dict):
|
16 |
+
self.config = config
|
17 |
+
self.base_price = config.get('synthetic_data', {}).get('base_price', 100.0)
|
18 |
+
self.volatility = config.get('synthetic_data', {}).get('volatility', 0.02)
|
19 |
+
self.trend = config.get('synthetic_data', {}).get('trend', 0.001)
|
20 |
+
self.noise_level = config.get('synthetic_data', {}).get('noise_level', 0.005)
|
21 |
+
|
22 |
+
logger.info(f"Initialized SyntheticDataGenerator with base_price={self.base_price}, "
|
23 |
+
f"volatility={self.volatility}, trend={self.trend}")
|
24 |
+
|
25 |
+
def generate_ohlcv_data(self,
|
26 |
+
symbol: str = 'AAPL',
|
27 |
+
start_date: str = '2024-01-01',
|
28 |
+
end_date: str = '2024-12-31',
|
29 |
+
frequency: str = '1min') -> pd.DataFrame:
|
30 |
+
"""
|
31 |
+
Generate synthetic OHLCV (Open, High, Low, Close, Volume) data.
|
32 |
+
|
33 |
+
Args:
|
34 |
+
symbol: Stock symbol
|
35 |
+
start_date: Start date in YYYY-MM-DD format
|
36 |
+
end_date: End date in YYYY-MM-DD format
|
37 |
+
frequency: Data frequency ('1min', '5min', '1H', '1D')
|
38 |
+
|
39 |
+
Returns:
|
40 |
+
DataFrame with OHLCV data
|
41 |
+
"""
|
42 |
+
logger.info(f"Generating synthetic OHLCV data for {symbol} from {start_date} to {end_date}")
|
43 |
+
|
44 |
+
# Create datetime range
|
45 |
+
start_dt = pd.to_datetime(start_date)
|
46 |
+
end_dt = pd.to_datetime(end_date)
|
47 |
+
|
48 |
+
# Generate timestamps based on frequency
|
49 |
+
if frequency == '1min' or frequency == '1m':
|
50 |
+
timestamps = pd.date_range(start=start_dt, end=end_dt, freq='1min')
|
51 |
+
elif frequency == '5min' or frequency == '5m':
|
52 |
+
timestamps = pd.date_range(start=start_dt, end=end_dt, freq='5min')
|
53 |
+
elif frequency == '1H' or frequency == '1h':
|
54 |
+
timestamps = pd.date_range(start=start_dt, end=end_dt, freq='1H')
|
55 |
+
elif frequency == '1D' or frequency == '1d':
|
56 |
+
timestamps = pd.date_range(start=start_dt, end=end_dt, freq='1D')
|
57 |
+
else:
|
58 |
+
raise ValueError(f"Unsupported frequency: {frequency}")
|
59 |
+
|
60 |
+
# Generate price data
|
61 |
+
prices = self._generate_price_series(len(timestamps))
|
62 |
+
|
63 |
+
# Generate OHLCV data
|
64 |
+
data = []
|
65 |
+
current_price = self.base_price
|
66 |
+
|
67 |
+
for i, timestamp in enumerate(timestamps):
|
68 |
+
# Add trend and noise
|
69 |
+
trend_component = self.trend * i
|
70 |
+
noise = np.random.normal(0, self.noise_level)
|
71 |
+
|
72 |
+
# Generate OHLC from current price
|
73 |
+
open_price = current_price * (1 + noise)
|
74 |
+
close_price = open_price * (1 + np.random.normal(0, self.volatility))
|
75 |
+
|
76 |
+
# Generate high and low
|
77 |
+
price_range = abs(close_price - open_price) * np.random.uniform(1.5, 3.0)
|
78 |
+
high_price = max(open_price, close_price) + price_range * np.random.uniform(0, 0.5)
|
79 |
+
low_price = min(open_price, close_price) - price_range * np.random.uniform(0, 0.5)
|
80 |
+
|
81 |
+
# Generate volume (correlated with price movement)
|
82 |
+
volume = np.random.randint(1000, 100000) * (1 + abs(close_price - open_price) / open_price)
|
83 |
+
|
84 |
+
data.append({
|
85 |
+
'timestamp': timestamp,
|
86 |
+
'symbol': symbol,
|
87 |
+
'open': round(open_price, 2),
|
88 |
+
'high': round(high_price, 2),
|
89 |
+
'low': round(low_price, 2),
|
90 |
+
'close': round(close_price, 2),
|
91 |
+
'volume': int(volume)
|
92 |
+
})
|
93 |
+
|
94 |
+
current_price = close_price
|
95 |
+
|
96 |
+
df = pd.DataFrame(data)
|
97 |
+
logger.info(f"Generated {len(df)} data points for {symbol}")
|
98 |
+
return df
|
99 |
+
|
100 |
+
def generate_tick_data(self,
|
101 |
+
symbol: str = 'AAPL',
|
102 |
+
duration_minutes: int = 60,
|
103 |
+
tick_interval_ms: int = 1000) -> pd.DataFrame:
|
104 |
+
"""
|
105 |
+
Generate high-frequency tick data for testing.
|
106 |
+
|
107 |
+
Args:
|
108 |
+
symbol: Stock symbol
|
109 |
+
duration_minutes: Duration in minutes
|
110 |
+
tick_interval_ms: Interval between ticks in milliseconds
|
111 |
+
|
112 |
+
Returns:
|
113 |
+
DataFrame with tick data
|
114 |
+
"""
|
115 |
+
logger.info(f"Generating tick data for {symbol} for {duration_minutes} minutes")
|
116 |
+
|
117 |
+
num_ticks = (duration_minutes * 60 * 1000) // tick_interval_ms
|
118 |
+
timestamps = pd.date_range(
|
119 |
+
start=datetime.now(),
|
120 |
+
periods=num_ticks,
|
121 |
+
freq=f'{tick_interval_ms}ms'
|
122 |
+
)
|
123 |
+
|
124 |
+
# Generate price series with more noise for tick data
|
125 |
+
base_prices = self._generate_price_series(num_ticks, volatility=self.volatility * 2)
|
126 |
+
|
127 |
+
data = []
|
128 |
+
for i, (timestamp, base_price) in enumerate(zip(timestamps, base_prices)):
|
129 |
+
# Add micro-movements
|
130 |
+
tick_price = base_price * (1 + np.random.normal(0, self.noise_level * 0.5))
|
131 |
+
|
132 |
+
data.append({
|
133 |
+
'timestamp': timestamp,
|
134 |
+
'symbol': symbol,
|
135 |
+
'price': round(tick_price, 4),
|
136 |
+
'volume': np.random.randint(1, 100)
|
137 |
+
})
|
138 |
+
|
139 |
+
df = pd.DataFrame(data)
|
140 |
+
logger.info(f"Generated {len(df)} tick data points for {symbol}")
|
141 |
+
return df
|
142 |
+
|
143 |
+
def _generate_price_series(self, length: int, volatility: Optional[float] = None) -> np.ndarray:
|
144 |
+
"""
|
145 |
+
Generate a realistic price series using geometric Brownian motion.
|
146 |
+
|
147 |
+
Args:
|
148 |
+
length: Number of price points
|
149 |
+
volatility: Price volatility (if None, uses self.volatility)
|
150 |
+
|
151 |
+
Returns:
|
152 |
+
Array of prices
|
153 |
+
"""
|
154 |
+
if volatility is None:
|
155 |
+
volatility = self.volatility
|
156 |
+
|
157 |
+
# Geometric Brownian motion parameters
|
158 |
+
mu = self.trend # drift
|
159 |
+
sigma = volatility # volatility
|
160 |
+
|
161 |
+
# Generate random walks
|
162 |
+
dt = 1.0 / length
|
163 |
+
t = np.linspace(0, 1, length)
|
164 |
+
|
165 |
+
# Brownian motion
|
166 |
+
dW = np.random.normal(0, np.sqrt(dt), length)
|
167 |
+
W = np.cumsum(dW)
|
168 |
+
|
169 |
+
# Geometric Brownian motion
|
170 |
+
S = self.base_price * np.exp((mu - 0.5 * sigma**2) * t + sigma * W)
|
171 |
+
|
172 |
+
return S
|
173 |
+
|
174 |
+
def save_to_csv(self, df: pd.DataFrame, filepath: str) -> None:
|
175 |
+
"""
|
176 |
+
Save generated data to CSV file.
|
177 |
+
|
178 |
+
Args:
|
179 |
+
df: DataFrame to save
|
180 |
+
filepath: Path to save the CSV file
|
181 |
+
"""
|
182 |
+
df.to_csv(filepath, index=False)
|
183 |
+
logger.info(f"Saved synthetic data to {filepath}")
|
184 |
+
|
185 |
+
def generate_market_scenarios(self, scenario_type: str = 'normal') -> pd.DataFrame:
|
186 |
+
"""
|
187 |
+
Generate data for different market scenarios.
|
188 |
+
|
189 |
+
Args:
|
190 |
+
scenario_type: Type of scenario ('normal', 'volatile', 'trending', 'crash')
|
191 |
+
|
192 |
+
Returns:
|
193 |
+
DataFrame with scenario-specific data
|
194 |
+
"""
|
195 |
+
logger.info(f"Generating {scenario_type} market scenario")
|
196 |
+
|
197 |
+
if scenario_type == 'normal':
|
198 |
+
return self.generate_ohlcv_data()
|
199 |
+
elif scenario_type == 'volatile':
|
200 |
+
# High volatility scenario
|
201 |
+
self.volatility *= 3
|
202 |
+
data = self.generate_ohlcv_data()
|
203 |
+
self.volatility /= 3 # Reset
|
204 |
+
return data
|
205 |
+
elif scenario_type == 'trending':
|
206 |
+
# Strong upward trend
|
207 |
+
self.trend *= 5
|
208 |
+
data = self.generate_ohlcv_data()
|
209 |
+
self.trend /= 5 # Reset
|
210 |
+
return data
|
211 |
+
elif scenario_type == 'crash':
|
212 |
+
# Market crash scenario
|
213 |
+
original_volatility = self.volatility
|
214 |
+
original_trend = self.trend
|
215 |
+
|
216 |
+
self.volatility *= 5
|
217 |
+
self.trend = -0.01 # Strong downward trend
|
218 |
+
|
219 |
+
try:
|
220 |
+
data = self.generate_ohlcv_data()
|
221 |
+
finally:
|
222 |
+
# Reset parameters
|
223 |
+
self.volatility = original_volatility
|
224 |
+
self.trend = original_trend
|
225 |
+
|
226 |
+
return data
|
227 |
+
else:
|
228 |
+
raise ValueError(f"Unknown scenario type: {scenario_type}")
|
config.yaml
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Configuration file for the agentic AI trading system
|
2 |
+
data_source:
|
3 |
+
type: 'csv'
|
4 |
+
path: 'data/market_data.csv'
|
5 |
+
|
6 |
+
trading:
|
7 |
+
symbol: 'AAPL'
|
8 |
+
timeframe: '1m'
|
9 |
+
capital: 100000
|
10 |
+
|
11 |
+
risk:
|
12 |
+
max_position: 100
|
13 |
+
max_drawdown: 0.05
|
14 |
+
|
15 |
+
execution:
|
16 |
+
broker_api: 'paper'
|
17 |
+
order_size: 10
|
18 |
+
|
19 |
+
# Synthetic data generation settings
|
20 |
+
synthetic_data:
|
21 |
+
base_price: 150.0
|
22 |
+
volatility: 0.02
|
23 |
+
trend: 0.001
|
24 |
+
noise_level: 0.005
|
25 |
+
generate_data: true
|
26 |
+
data_path: 'data/synthetic_market_data.csv'
|
27 |
+
|
28 |
+
# Logging configuration
|
29 |
+
logging:
|
30 |
+
log_level: 'INFO'
|
31 |
+
log_dir: 'logs'
|
32 |
+
enable_console: true
|
33 |
+
enable_file: true
|
34 |
+
max_file_size_mb: 10
|
35 |
+
backup_count: 5
|
demo.py
ADDED
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
Demonstration script for the Algorithmic Trading System
|
4 |
+
|
5 |
+
This script demonstrates the key features of the system:
|
6 |
+
- Synthetic data generation
|
7 |
+
- Trading workflow execution
|
8 |
+
- Backtesting
|
9 |
+
- Logging
|
10 |
+
"""
|
11 |
+
|
12 |
+
import yaml
|
13 |
+
import pandas as pd
|
14 |
+
from agentic_ai_system.synthetic_data_generator import SyntheticDataGenerator
|
15 |
+
from agentic_ai_system.logger_config import setup_logging
|
16 |
+
from agentic_ai_system.orchestrator import run, run_backtest
|
17 |
+
from agentic_ai_system.main import load_config
|
18 |
+
|
19 |
+
def main():
|
20 |
+
"""Main demonstration function"""
|
21 |
+
print("🚀 Algorithmic Trading System Demo")
|
22 |
+
print("=" * 50)
|
23 |
+
|
24 |
+
# Load configuration
|
25 |
+
try:
|
26 |
+
config = load_config()
|
27 |
+
print("✅ Configuration loaded successfully")
|
28 |
+
except Exception as e:
|
29 |
+
print(f"❌ Error loading configuration: {e}")
|
30 |
+
return
|
31 |
+
|
32 |
+
# Setup logging
|
33 |
+
setup_logging(config)
|
34 |
+
print("✅ Logging system initialized")
|
35 |
+
|
36 |
+
# Demo 1: Synthetic Data Generation
|
37 |
+
print("\n📊 Demo 1: Synthetic Data Generation")
|
38 |
+
print("-" * 30)
|
39 |
+
|
40 |
+
try:
|
41 |
+
generator = SyntheticDataGenerator(config)
|
42 |
+
|
43 |
+
# Generate OHLCV data
|
44 |
+
print("Generating OHLCV data...")
|
45 |
+
ohlcv_data = generator.generate_ohlcv_data(
|
46 |
+
symbol='AAPL',
|
47 |
+
start_date='2024-01-01',
|
48 |
+
end_date='2024-01-02',
|
49 |
+
frequency='1H'
|
50 |
+
)
|
51 |
+
print(f"✅ Generated {len(ohlcv_data)} OHLCV data points")
|
52 |
+
|
53 |
+
# Show sample data
|
54 |
+
print("\nSample OHLCV data:")
|
55 |
+
print(ohlcv_data.head())
|
56 |
+
|
57 |
+
# Generate different market scenarios
|
58 |
+
print("\nGenerating market scenarios...")
|
59 |
+
scenarios = ['normal', 'volatile', 'trending', 'crash']
|
60 |
+
|
61 |
+
for scenario in scenarios:
|
62 |
+
scenario_data = generator.generate_market_scenarios(scenario)
|
63 |
+
avg_price = scenario_data['close'].mean()
|
64 |
+
print(f" {scenario.capitalize()} market: {len(scenario_data)} points, avg price: ${avg_price:.2f}")
|
65 |
+
|
66 |
+
except Exception as e:
|
67 |
+
print(f"❌ Error in synthetic data generation: {e}")
|
68 |
+
|
69 |
+
# Demo 2: Trading Workflow
|
70 |
+
print("\n🤖 Demo 2: Trading Workflow")
|
71 |
+
print("-" * 30)
|
72 |
+
|
73 |
+
try:
|
74 |
+
print("Running trading workflow...")
|
75 |
+
result = run(config)
|
76 |
+
|
77 |
+
if result['success']:
|
78 |
+
print("✅ Trading workflow completed successfully")
|
79 |
+
print(f" Data loaded: {result['data_loaded']}")
|
80 |
+
print(f" Signal generated: {result['signal_generated']}")
|
81 |
+
print(f" Order executed: {result['order_executed']}")
|
82 |
+
print(f" Execution time: {result['execution_time']:.2f} seconds")
|
83 |
+
|
84 |
+
if result['order_executed'] and result['execution_result']:
|
85 |
+
exec_result = result['execution_result']
|
86 |
+
print(f" Order details: {exec_result['action']} {exec_result['quantity']} {exec_result['symbol']} @ ${exec_result['price']:.2f}")
|
87 |
+
else:
|
88 |
+
print("❌ Trading workflow failed")
|
89 |
+
print(f" Errors: {result['errors']}")
|
90 |
+
|
91 |
+
except Exception as e:
|
92 |
+
print(f"❌ Error in trading workflow: {e}")
|
93 |
+
|
94 |
+
# Demo 3: Backtesting
|
95 |
+
print("\n📈 Demo 3: Backtesting")
|
96 |
+
print("-" * 30)
|
97 |
+
|
98 |
+
try:
|
99 |
+
print("Running backtest...")
|
100 |
+
backtest_result = run_backtest(config, '2024-01-01', '2024-01-07')
|
101 |
+
|
102 |
+
if backtest_result['success']:
|
103 |
+
print("✅ Backtest completed successfully")
|
104 |
+
print(f" Initial capital: ${backtest_result['initial_capital']:,.2f}")
|
105 |
+
print(f" Final value: ${backtest_result['final_value']:,.2f}")
|
106 |
+
print(f" Total return: {backtest_result['total_return']:.2%}")
|
107 |
+
print(f" Total trades: {backtest_result['total_trades']}")
|
108 |
+
print(f" Positions: {backtest_result['positions']}")
|
109 |
+
else:
|
110 |
+
print("❌ Backtest failed")
|
111 |
+
print(f" Error: {backtest_result['error']}")
|
112 |
+
|
113 |
+
except Exception as e:
|
114 |
+
print(f"❌ Error in backtesting: {e}")
|
115 |
+
|
116 |
+
# Demo 4: System Statistics
|
117 |
+
print("\n📊 Demo 4: System Statistics")
|
118 |
+
print("-" * 30)
|
119 |
+
|
120 |
+
try:
|
121 |
+
# Show configuration summary
|
122 |
+
print("Configuration Summary:")
|
123 |
+
print(f" Trading symbol: {config['trading']['symbol']}")
|
124 |
+
print(f" Timeframe: {config['trading']['timeframe']}")
|
125 |
+
print(f" Capital: ${config['trading']['capital']:,.2f}")
|
126 |
+
print(f" Max position: {config['risk']['max_position']}")
|
127 |
+
print(f" Max drawdown: {config['risk']['max_drawdown']:.1%}")
|
128 |
+
print(f" Broker API: {config['execution']['broker_api']}")
|
129 |
+
|
130 |
+
# Show synthetic data parameters
|
131 |
+
print("\nSynthetic Data Parameters:")
|
132 |
+
print(f" Base price: ${config['synthetic_data']['base_price']:.2f}")
|
133 |
+
print(f" Volatility: {config['synthetic_data']['volatility']:.3f}")
|
134 |
+
print(f" Trend: {config['synthetic_data']['trend']:.3f}")
|
135 |
+
print(f" Noise level: {config['synthetic_data']['noise_level']:.3f}")
|
136 |
+
|
137 |
+
except Exception as e:
|
138 |
+
print(f"❌ Error showing statistics: {e}")
|
139 |
+
|
140 |
+
print("\n🎉 Demo completed!")
|
141 |
+
print("\n📝 Check the logs directory for detailed logs:")
|
142 |
+
print(" - logs/trading_system.log")
|
143 |
+
print(" - logs/trading.log")
|
144 |
+
print(" - logs/performance.log")
|
145 |
+
print(" - logs/errors.log")
|
146 |
+
|
147 |
+
if __name__ == '__main__':
|
148 |
+
main()
|
pytest.ini
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[tool:pytest]
|
2 |
+
testpaths = tests
|
3 |
+
python_files = test_*.py
|
4 |
+
python_classes = Test*
|
5 |
+
python_functions = test_*
|
6 |
+
addopts =
|
7 |
+
-v
|
8 |
+
--tb=short
|
9 |
+
--strict-markers
|
10 |
+
--disable-warnings
|
11 |
+
--cov=agentic_ai_system
|
12 |
+
--cov-report=term-missing
|
13 |
+
--cov-report=html:htmlcov
|
14 |
+
--cov-report=xml
|
15 |
+
markers =
|
16 |
+
unit: Unit tests
|
17 |
+
integration: Integration tests
|
18 |
+
slow: Slow running tests
|
19 |
+
synthetic: Tests involving synthetic data generation
|
requirements.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
pyyaml
|
2 |
+
pandas
|
3 |
+
numpy
|
4 |
+
matplotlib
|
5 |
+
seaborn
|
6 |
+
pytest
|
7 |
+
pytest-cov
|
8 |
+
python-dateutil
|
9 |
+
scipy
|
tests/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
# Tests package for algorithmic trading system
|
tests/test_data_ingestion.py
ADDED
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
import pandas as pd
|
3 |
+
import numpy as np
|
4 |
+
import tempfile
|
5 |
+
import os
|
6 |
+
from unittest.mock import patch, MagicMock
|
7 |
+
from agentic_ai_system.data_ingestion import load_data, validate_data, _load_csv_data, _generate_synthetic_data
|
8 |
+
|
9 |
+
class TestDataIngestion:
|
10 |
+
"""Test cases for data ingestion module"""
|
11 |
+
|
12 |
+
@pytest.fixture
|
13 |
+
def config(self):
|
14 |
+
"""Sample configuration for testing"""
|
15 |
+
return {
|
16 |
+
'data_source': {
|
17 |
+
'type': 'csv',
|
18 |
+
'path': 'data/market_data.csv'
|
19 |
+
},
|
20 |
+
'synthetic_data': {
|
21 |
+
'base_price': 150.0,
|
22 |
+
'volatility': 0.02,
|
23 |
+
'trend': 0.001,
|
24 |
+
'noise_level': 0.005,
|
25 |
+
'data_path': 'data/synthetic_market_data.csv'
|
26 |
+
},
|
27 |
+
'trading': {
|
28 |
+
'symbol': 'AAPL',
|
29 |
+
'timeframe': '1min'
|
30 |
+
}
|
31 |
+
}
|
32 |
+
|
33 |
+
@pytest.fixture
|
34 |
+
def sample_csv_data(self):
|
35 |
+
"""Create sample CSV data for testing"""
|
36 |
+
dates = pd.date_range(start='2024-01-01', periods=100, freq='1min')
|
37 |
+
|
38 |
+
data = []
|
39 |
+
for i, date in enumerate(dates):
|
40 |
+
base_price = 150.0 + (i * 0.1)
|
41 |
+
data.append({
|
42 |
+
'timestamp': date,
|
43 |
+
'open': base_price + np.random.normal(0, 1),
|
44 |
+
'high': base_price + abs(np.random.normal(0, 2)),
|
45 |
+
'low': base_price - abs(np.random.normal(0, 2)),
|
46 |
+
'close': base_price + np.random.normal(0, 1),
|
47 |
+
'volume': np.random.randint(1000, 100000)
|
48 |
+
})
|
49 |
+
|
50 |
+
return pd.DataFrame(data)
|
51 |
+
|
52 |
+
def test_load_data_csv_type(self, config, sample_csv_data):
|
53 |
+
"""Test loading data with CSV type"""
|
54 |
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.csv', delete=False) as tmp_file:
|
55 |
+
sample_csv_data.to_csv(tmp_file.name, index=False)
|
56 |
+
config['data_source']['path'] = tmp_file.name
|
57 |
+
|
58 |
+
try:
|
59 |
+
result = load_data(config)
|
60 |
+
|
61 |
+
assert isinstance(result, pd.DataFrame)
|
62 |
+
assert len(result) == len(sample_csv_data)
|
63 |
+
assert list(result.columns) == list(sample_csv_data.columns)
|
64 |
+
|
65 |
+
finally:
|
66 |
+
os.unlink(tmp_file.name)
|
67 |
+
|
68 |
+
def test_load_data_synthetic_type(self, config):
|
69 |
+
"""Test loading data with synthetic type"""
|
70 |
+
config['data_source']['type'] = 'synthetic'
|
71 |
+
|
72 |
+
with patch('agentic_ai_system.data_ingestion._generate_synthetic_data') as mock_generate:
|
73 |
+
mock_df = pd.DataFrame({
|
74 |
+
'timestamp': pd.date_range('2024-01-01', periods=10, freq='1min'),
|
75 |
+
'open': [150] * 10,
|
76 |
+
'high': [155] * 10,
|
77 |
+
'low': [145] * 10,
|
78 |
+
'close': [152] * 10,
|
79 |
+
'volume': [1000] * 10
|
80 |
+
})
|
81 |
+
mock_generate.return_value = mock_df
|
82 |
+
|
83 |
+
result = load_data(config)
|
84 |
+
|
85 |
+
assert isinstance(result, pd.DataFrame)
|
86 |
+
mock_generate.assert_called_once_with(config)
|
87 |
+
|
88 |
+
def test_load_data_invalid_type(self, config):
|
89 |
+
"""Test loading data with invalid type"""
|
90 |
+
config['data_source']['type'] = 'invalid_type'
|
91 |
+
|
92 |
+
with pytest.raises(ValueError, match="Unsupported data source type"):
|
93 |
+
load_data(config)
|
94 |
+
|
95 |
+
def test_load_csv_data_file_exists(self, config, sample_csv_data):
|
96 |
+
"""Test loading CSV data when file exists"""
|
97 |
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.csv', delete=False) as tmp_file:
|
98 |
+
sample_csv_data.to_csv(tmp_file.name, index=False)
|
99 |
+
config['data_source']['path'] = tmp_file.name
|
100 |
+
|
101 |
+
try:
|
102 |
+
result = _load_csv_data(config)
|
103 |
+
|
104 |
+
assert isinstance(result, pd.DataFrame)
|
105 |
+
assert len(result) == len(sample_csv_data)
|
106 |
+
assert result['timestamp'].dtype == 'datetime64[ns]'
|
107 |
+
|
108 |
+
finally:
|
109 |
+
os.unlink(tmp_file.name)
|
110 |
+
|
111 |
+
def test_load_csv_data_file_not_exists(self, config):
|
112 |
+
"""Test loading CSV data when file doesn't exist"""
|
113 |
+
config['data_source']['path'] = 'nonexistent_file.csv'
|
114 |
+
|
115 |
+
with patch('agentic_ai_system.data_ingestion._generate_synthetic_data') as mock_generate:
|
116 |
+
mock_df = pd.DataFrame({'test': [1, 2, 3]})
|
117 |
+
mock_generate.return_value = mock_df
|
118 |
+
|
119 |
+
result = _load_csv_data(config)
|
120 |
+
|
121 |
+
assert result is mock_df
|
122 |
+
mock_generate.assert_called_once_with(config)
|
123 |
+
|
124 |
+
def test_load_csv_data_missing_columns(self, config):
|
125 |
+
"""Test loading CSV data with missing columns"""
|
126 |
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.csv', delete=False) as tmp_file:
|
127 |
+
# Create CSV with missing columns
|
128 |
+
incomplete_data = pd.DataFrame({
|
129 |
+
'timestamp': pd.date_range('2024-01-01', periods=10, freq='1min'),
|
130 |
+
'open': [150] * 10,
|
131 |
+
'close': [152] * 10
|
132 |
+
# Missing high, low, volume
|
133 |
+
})
|
134 |
+
incomplete_data.to_csv(tmp_file.name, index=False)
|
135 |
+
config['data_source']['path'] = tmp_file.name
|
136 |
+
|
137 |
+
try:
|
138 |
+
with patch('agentic_ai_system.data_ingestion._generate_synthetic_data') as mock_generate:
|
139 |
+
mock_df = pd.DataFrame({'test': [1, 2, 3]})
|
140 |
+
mock_generate.return_value = mock_df
|
141 |
+
|
142 |
+
result = _load_csv_data(config)
|
143 |
+
|
144 |
+
assert result is mock_df
|
145 |
+
mock_generate.assert_called_once_with(config)
|
146 |
+
|
147 |
+
finally:
|
148 |
+
os.unlink(tmp_file.name)
|
149 |
+
|
150 |
+
def test_generate_synthetic_data(self, config):
|
151 |
+
"""Test synthetic data generation"""
|
152 |
+
with patch('agentic_ai_system.synthetic_data_generator.SyntheticDataGenerator') as mock_generator_class:
|
153 |
+
mock_generator = MagicMock()
|
154 |
+
mock_generator_class.return_value = mock_generator
|
155 |
+
|
156 |
+
mock_df = pd.DataFrame({
|
157 |
+
'timestamp': pd.date_range('2024-01-01', periods=10, freq='1min'),
|
158 |
+
'open': [150] * 10,
|
159 |
+
'high': [155] * 10,
|
160 |
+
'low': [145] * 10,
|
161 |
+
'close': [152] * 10,
|
162 |
+
'volume': [1000] * 10
|
163 |
+
})
|
164 |
+
mock_generator.generate_ohlcv_data.return_value = mock_df
|
165 |
+
|
166 |
+
result = _generate_synthetic_data(config)
|
167 |
+
|
168 |
+
assert isinstance(result, pd.DataFrame)
|
169 |
+
mock_generator.generate_ohlcv_data.assert_called_once()
|
170 |
+
mock_generator.save_to_csv.assert_called_once()
|
171 |
+
|
172 |
+
def test_validate_data_valid(self, sample_csv_data):
|
173 |
+
"""Test data validation with valid data"""
|
174 |
+
assert validate_data(sample_csv_data) == True
|
175 |
+
|
176 |
+
def test_validate_data_missing_columns(self):
|
177 |
+
"""Test data validation with missing columns"""
|
178 |
+
invalid_data = pd.DataFrame({
|
179 |
+
'timestamp': pd.date_range('2024-01-01', periods=10, freq='1min'),
|
180 |
+
'open': [150] * 10
|
181 |
+
# Missing required columns
|
182 |
+
})
|
183 |
+
|
184 |
+
assert validate_data(invalid_data) == False
|
185 |
+
|
186 |
+
def test_validate_data_negative_prices(self):
|
187 |
+
"""Test data validation with negative prices"""
|
188 |
+
invalid_data = pd.DataFrame({
|
189 |
+
'timestamp': pd.date_range('2024-01-01', periods=10, freq='1min'),
|
190 |
+
'open': [150] * 10,
|
191 |
+
'high': [155] * 10,
|
192 |
+
'low': [-145] * 10, # Negative low price
|
193 |
+
'close': [152] * 10,
|
194 |
+
'volume': [1000] * 10
|
195 |
+
})
|
196 |
+
|
197 |
+
assert validate_data(invalid_data) == False
|
198 |
+
|
199 |
+
def test_validate_data_negative_volumes(self):
|
200 |
+
"""Test data validation with negative volumes"""
|
201 |
+
invalid_data = pd.DataFrame({
|
202 |
+
'timestamp': pd.date_range('2024-01-01', periods=10, freq='1min'),
|
203 |
+
'open': [150] * 10,
|
204 |
+
'high': [155] * 10,
|
205 |
+
'low': [145] * 10,
|
206 |
+
'close': [152] * 10,
|
207 |
+
'volume': [-1000] * 10 # Negative volume
|
208 |
+
})
|
209 |
+
|
210 |
+
assert validate_data(invalid_data) == False
|
211 |
+
|
212 |
+
def test_validate_data_invalid_ohlc(self):
|
213 |
+
"""Test data validation with invalid OHLC relationships"""
|
214 |
+
invalid_data = pd.DataFrame({
|
215 |
+
'timestamp': pd.date_range('2024-01-01', periods=10, freq='1min'),
|
216 |
+
'open': [150] * 10,
|
217 |
+
'high': [145] * 10, # High < Open
|
218 |
+
'low': [145] * 10,
|
219 |
+
'close': [152] * 10,
|
220 |
+
'volume': [1000] * 10
|
221 |
+
})
|
222 |
+
|
223 |
+
assert validate_data(invalid_data) == False
|
224 |
+
|
225 |
+
def test_validate_data_null_values(self):
|
226 |
+
"""Test data validation with null values"""
|
227 |
+
invalid_data = pd.DataFrame({
|
228 |
+
'timestamp': pd.date_range('2024-01-01', periods=10, freq='1min'),
|
229 |
+
'open': [150] * 10,
|
230 |
+
'high': [155] * 10,
|
231 |
+
'low': [145] * 10,
|
232 |
+
'close': [152] * 10,
|
233 |
+
'volume': [1000] * 10
|
234 |
+
})
|
235 |
+
|
236 |
+
# Add null values
|
237 |
+
invalid_data.loc[0, 'open'] = None
|
238 |
+
|
239 |
+
assert validate_data(invalid_data) == False
|
240 |
+
|
241 |
+
def test_validate_data_empty_dataframe(self):
|
242 |
+
"""Test data validation with empty DataFrame"""
|
243 |
+
empty_data = pd.DataFrame()
|
244 |
+
assert validate_data(empty_data) == False
|
245 |
+
|
246 |
+
def test_load_data_error_handling(self, config):
|
247 |
+
"""Test error handling in load_data"""
|
248 |
+
config['data_source']['type'] = 'csv'
|
249 |
+
config['data_source']['path'] = 'nonexistent_file.csv'
|
250 |
+
|
251 |
+
with patch('agentic_ai_system.data_ingestion._generate_synthetic_data', side_effect=Exception("Test error")):
|
252 |
+
with pytest.raises(Exception, match="Test error"):
|
253 |
+
load_data(config)
|
254 |
+
|
255 |
+
def test_csv_data_timestamp_conversion(self, config, sample_csv_data):
|
256 |
+
"""Test timestamp conversion in CSV loading"""
|
257 |
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.csv', delete=False) as tmp_file:
|
258 |
+
# Convert timestamp to string for CSV
|
259 |
+
sample_csv_data['timestamp'] = sample_csv_data['timestamp'].astype(str)
|
260 |
+
sample_csv_data.to_csv(tmp_file.name, index=False)
|
261 |
+
config['data_source']['path'] = tmp_file.name
|
262 |
+
|
263 |
+
try:
|
264 |
+
result = _load_csv_data(config)
|
265 |
+
|
266 |
+
# Check that timestamp is converted to datetime
|
267 |
+
assert result['timestamp'].dtype == 'datetime64[ns]'
|
268 |
+
|
269 |
+
finally:
|
270 |
+
os.unlink(tmp_file.name)
|
271 |
+
|
272 |
+
def test_synthetic_data_directory_creation(self, config):
|
273 |
+
"""Test that synthetic data directory is created if it doesn't exist"""
|
274 |
+
with patch('os.makedirs') as mock_makedirs:
|
275 |
+
with patch('agentic_ai_system.synthetic_data_generator.SyntheticDataGenerator') as mock_generator_class:
|
276 |
+
mock_generator = MagicMock()
|
277 |
+
mock_generator_class.return_value = mock_generator
|
278 |
+
|
279 |
+
mock_df = pd.DataFrame({'test': [1, 2, 3]})
|
280 |
+
mock_generator.generate_ohlcv_data.return_value = mock_df
|
281 |
+
|
282 |
+
_generate_synthetic_data(config)
|
283 |
+
|
284 |
+
# Check that makedirs was called
|
285 |
+
mock_makedirs.assert_called_once()
|
286 |
+
|
287 |
+
def test_data_validation_edge_cases(self):
|
288 |
+
"""Test data validation with edge cases"""
|
289 |
+
# Test with single row
|
290 |
+
single_row_data = pd.DataFrame({
|
291 |
+
'timestamp': [pd.Timestamp('2024-01-01')],
|
292 |
+
'open': [150],
|
293 |
+
'high': [155],
|
294 |
+
'low': [145],
|
295 |
+
'close': [152],
|
296 |
+
'volume': [1000]
|
297 |
+
})
|
298 |
+
|
299 |
+
assert validate_data(single_row_data) == True
|
300 |
+
|
301 |
+
# Test with very large numbers
|
302 |
+
large_data = pd.DataFrame({
|
303 |
+
'timestamp': pd.date_range('2024-01-01', periods=5, freq='1min'),
|
304 |
+
'open': [1e6] * 5,
|
305 |
+
'high': [1e6 + 100] * 5,
|
306 |
+
'low': [1e6 - 100] * 5,
|
307 |
+
'close': [1e6 + 50] * 5,
|
308 |
+
'volume': [1e9] * 5
|
309 |
+
})
|
310 |
+
|
311 |
+
assert validate_data(large_data) == True
|
tests/test_execution_agent.py
ADDED
@@ -0,0 +1,300 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
import time
|
3 |
+
from unittest.mock import patch, MagicMock
|
4 |
+
from agentic_ai_system.execution_agent import ExecutionAgent
|
5 |
+
|
6 |
+
class TestExecutionAgent:
|
7 |
+
"""Test cases for ExecutionAgent"""
|
8 |
+
|
9 |
+
@pytest.fixture
|
10 |
+
def config(self):
|
11 |
+
"""Sample configuration for testing"""
|
12 |
+
return {
|
13 |
+
'execution': {
|
14 |
+
'broker_api': 'paper',
|
15 |
+
'order_size': 10,
|
16 |
+
'delay_ms': 50,
|
17 |
+
'success_rate': 0.95
|
18 |
+
},
|
19 |
+
'trading': {
|
20 |
+
'symbol': 'AAPL',
|
21 |
+
'timeframe': '1min',
|
22 |
+
'capital': 100000
|
23 |
+
},
|
24 |
+
'risk': {
|
25 |
+
'max_position': 100,
|
26 |
+
'max_drawdown': 0.05
|
27 |
+
}
|
28 |
+
}
|
29 |
+
|
30 |
+
@pytest.fixture
|
31 |
+
def execution_agent(self, config):
|
32 |
+
"""Create an ExecutionAgent instance"""
|
33 |
+
return ExecutionAgent(config)
|
34 |
+
|
35 |
+
@pytest.fixture
|
36 |
+
def valid_signal(self):
|
37 |
+
"""Create a valid trading signal"""
|
38 |
+
return {
|
39 |
+
'action': 'buy',
|
40 |
+
'symbol': 'AAPL',
|
41 |
+
'quantity': 10,
|
42 |
+
'price': 150.0,
|
43 |
+
'confidence': 0.8
|
44 |
+
}
|
45 |
+
|
46 |
+
def test_initialization(self, execution_agent, config):
|
47 |
+
"""Test agent initialization"""
|
48 |
+
assert execution_agent.broker_api == config['execution']['broker_api']
|
49 |
+
assert execution_agent.order_size == config['execution']['order_size']
|
50 |
+
assert execution_agent.execution_delay == config['execution']['delay_ms']
|
51 |
+
assert execution_agent.success_rate == config['execution']['success_rate']
|
52 |
+
|
53 |
+
def test_act_with_valid_signal(self, execution_agent, valid_signal):
|
54 |
+
"""Test order execution with valid signal"""
|
55 |
+
result = execution_agent.act(valid_signal)
|
56 |
+
|
57 |
+
# Check result structure
|
58 |
+
assert isinstance(result, dict)
|
59 |
+
assert 'order_id' in result
|
60 |
+
assert 'status' in result
|
61 |
+
assert 'action' in result
|
62 |
+
assert 'symbol' in result
|
63 |
+
assert 'quantity' in result
|
64 |
+
assert 'price' in result
|
65 |
+
assert 'execution_time' in result
|
66 |
+
assert 'commission' in result
|
67 |
+
assert 'total_value' in result
|
68 |
+
assert 'success' in result
|
69 |
+
assert 'error' in result
|
70 |
+
|
71 |
+
# Check values
|
72 |
+
assert result['action'] == valid_signal['action']
|
73 |
+
assert result['symbol'] == valid_signal['symbol']
|
74 |
+
assert result['quantity'] == valid_signal['quantity']
|
75 |
+
assert result['price'] > 0
|
76 |
+
assert result['execution_time'] > 0
|
77 |
+
assert result['commission'] >= 0
|
78 |
+
assert result['total_value'] >= 0
|
79 |
+
|
80 |
+
def test_act_with_hold_signal(self, execution_agent):
|
81 |
+
"""Test order execution with hold signal"""
|
82 |
+
hold_signal = {
|
83 |
+
'action': 'hold',
|
84 |
+
'symbol': 'AAPL',
|
85 |
+
'quantity': 0,
|
86 |
+
'price': 0,
|
87 |
+
'confidence': 0.0
|
88 |
+
}
|
89 |
+
|
90 |
+
result = execution_agent.act(hold_signal)
|
91 |
+
|
92 |
+
assert result['action'] == 'hold'
|
93 |
+
assert result['quantity'] == 0
|
94 |
+
assert result['success'] == True # Hold should always succeed
|
95 |
+
|
96 |
+
def test_validate_signal_valid(self, execution_agent, valid_signal):
|
97 |
+
"""Test signal validation with valid signal"""
|
98 |
+
assert execution_agent._validate_signal(valid_signal) == True
|
99 |
+
|
100 |
+
def test_validate_signal_missing_fields(self, execution_agent):
|
101 |
+
"""Test signal validation with missing fields"""
|
102 |
+
invalid_signal = {'action': 'buy'} # Missing symbol and quantity
|
103 |
+
|
104 |
+
assert execution_agent._validate_signal(invalid_signal) == False
|
105 |
+
|
106 |
+
def test_validate_signal_invalid_action(self, execution_agent):
|
107 |
+
"""Test signal validation with invalid action"""
|
108 |
+
invalid_signal = {
|
109 |
+
'action': 'invalid_action',
|
110 |
+
'symbol': 'AAPL',
|
111 |
+
'quantity': 10
|
112 |
+
}
|
113 |
+
|
114 |
+
assert execution_agent._validate_signal(invalid_signal) == False
|
115 |
+
|
116 |
+
def test_validate_signal_invalid_quantity(self, execution_agent):
|
117 |
+
"""Test signal validation with invalid quantity"""
|
118 |
+
invalid_signal = {
|
119 |
+
'action': 'buy',
|
120 |
+
'symbol': 'AAPL',
|
121 |
+
'quantity': -5 # Negative quantity
|
122 |
+
}
|
123 |
+
|
124 |
+
assert execution_agent._validate_signal(invalid_signal) == False
|
125 |
+
|
126 |
+
def test_validate_signal_invalid_symbol(self, execution_agent):
|
127 |
+
"""Test signal validation with invalid symbol"""
|
128 |
+
invalid_signal = {
|
129 |
+
'action': 'buy',
|
130 |
+
'symbol': '', # Empty symbol
|
131 |
+
'quantity': 10
|
132 |
+
}
|
133 |
+
|
134 |
+
assert execution_agent._validate_signal(invalid_signal) == False
|
135 |
+
|
136 |
+
def test_calculate_commission(self, execution_agent):
|
137 |
+
"""Test commission calculation"""
|
138 |
+
# Test buy order
|
139 |
+
buy_signal = {'action': 'buy', 'quantity': 10}
|
140 |
+
commission_buy = execution_agent._calculate_commission(buy_signal)
|
141 |
+
|
142 |
+
# Base commission ($1) + per share commission ($0.01 * 10) = $1.10
|
143 |
+
expected_commission = 1.0 + (10 * 0.01)
|
144 |
+
assert commission_buy == expected_commission
|
145 |
+
|
146 |
+
# Test sell order
|
147 |
+
sell_signal = {'action': 'sell', 'quantity': 5}
|
148 |
+
commission_sell = execution_agent._calculate_commission(sell_signal)
|
149 |
+
|
150 |
+
expected_commission = 1.0 + (5 * 0.01)
|
151 |
+
assert commission_sell == expected_commission
|
152 |
+
|
153 |
+
# Test hold order (no commission)
|
154 |
+
hold_signal = {'action': 'hold', 'quantity': 0}
|
155 |
+
commission_hold = execution_agent._calculate_commission(hold_signal)
|
156 |
+
|
157 |
+
assert commission_hold == 0.0
|
158 |
+
|
159 |
+
def test_generate_order_id(self, execution_agent):
|
160 |
+
"""Test order ID generation"""
|
161 |
+
order_id = execution_agent._generate_order_id()
|
162 |
+
|
163 |
+
assert isinstance(order_id, str)
|
164 |
+
assert order_id.startswith('ORD_')
|
165 |
+
assert len(order_id) == 12 # 'ORD_' + 8 hex characters
|
166 |
+
|
167 |
+
def test_simulate_successful_execution(self, execution_agent, valid_signal):
|
168 |
+
"""Test successful execution simulation"""
|
169 |
+
result = execution_agent._simulate_successful_execution(valid_signal)
|
170 |
+
|
171 |
+
assert result['status'] == 'filled'
|
172 |
+
assert result['success'] == True
|
173 |
+
assert result['error'] is None
|
174 |
+
assert result['order_id'] is not None
|
175 |
+
assert result['price'] > 0
|
176 |
+
assert result['total_value'] > 0
|
177 |
+
|
178 |
+
def test_simulate_failed_execution(self, execution_agent, valid_signal):
|
179 |
+
"""Test failed execution simulation"""
|
180 |
+
result = execution_agent._simulate_failed_execution(valid_signal)
|
181 |
+
|
182 |
+
assert result['status'] == 'rejected'
|
183 |
+
assert result['success'] == False
|
184 |
+
assert result['error'] is not None
|
185 |
+
assert result['order_id'] is None
|
186 |
+
assert result['price'] == 0
|
187 |
+
assert result['total_value'] == 0
|
188 |
+
|
189 |
+
def test_generate_execution_result(self, execution_agent, valid_signal):
|
190 |
+
"""Test execution result generation"""
|
191 |
+
# Test successful result
|
192 |
+
success_result = execution_agent._generate_execution_result(valid_signal, True)
|
193 |
+
|
194 |
+
assert success_result['status'] == 'filled'
|
195 |
+
assert success_result['success'] == True
|
196 |
+
assert success_result['order_id'] is not None
|
197 |
+
|
198 |
+
# Test failed result
|
199 |
+
failed_result = execution_agent._generate_execution_result(valid_signal, False, "Test error")
|
200 |
+
|
201 |
+
assert failed_result['status'] == 'rejected'
|
202 |
+
assert failed_result['success'] == False
|
203 |
+
assert failed_result['error'] == "Test error"
|
204 |
+
assert failed_result['order_id'] is None
|
205 |
+
|
206 |
+
def test_execution_delay(self, execution_agent, valid_signal):
|
207 |
+
"""Test that execution delay is applied"""
|
208 |
+
start_time = time.time()
|
209 |
+
|
210 |
+
with patch('time.sleep') as mock_sleep:
|
211 |
+
execution_agent._execute_order(valid_signal)
|
212 |
+
mock_sleep.assert_called_once()
|
213 |
+
|
214 |
+
# Check that sleep was called with the correct delay
|
215 |
+
call_args = mock_sleep.call_args[0][0]
|
216 |
+
expected_delay = execution_agent.execution_delay / 1000.0
|
217 |
+
assert abs(call_args - expected_delay) < 0.001
|
218 |
+
|
219 |
+
def test_success_rate_simulation(self, execution_agent, valid_signal):
|
220 |
+
"""Test success rate simulation"""
|
221 |
+
# Set success rate to 0.0 (should always fail)
|
222 |
+
execution_agent.success_rate = 0.0
|
223 |
+
|
224 |
+
with patch('random.random', return_value=0.5): # Always above 0.0
|
225 |
+
result = execution_agent._execute_order(valid_signal)
|
226 |
+
assert result['success'] == False
|
227 |
+
|
228 |
+
# Set success rate to 1.0 (should always succeed)
|
229 |
+
execution_agent.success_rate = 1.0
|
230 |
+
|
231 |
+
with patch('random.random', return_value=0.5): # Always below 1.0
|
232 |
+
result = execution_agent._execute_order(valid_signal)
|
233 |
+
assert result['success'] == True
|
234 |
+
|
235 |
+
def test_error_handling_in_execution(self, execution_agent, valid_signal):
|
236 |
+
"""Test error handling during execution"""
|
237 |
+
# Mock _simulate_successful_execution to raise an exception
|
238 |
+
with patch.object(execution_agent, '_simulate_successful_execution', side_effect=Exception("Test error")):
|
239 |
+
result = execution_agent._execute_order(valid_signal)
|
240 |
+
|
241 |
+
assert result['success'] == False
|
242 |
+
assert "Test error" in result['error']
|
243 |
+
|
244 |
+
def test_get_execution_statistics(self, execution_agent):
|
245 |
+
"""Test execution statistics retrieval"""
|
246 |
+
stats = execution_agent.get_execution_statistics()
|
247 |
+
|
248 |
+
expected_keys = [
|
249 |
+
'total_orders', 'successful_orders', 'failed_orders',
|
250 |
+
'success_rate', 'average_execution_time', 'total_commission'
|
251 |
+
]
|
252 |
+
|
253 |
+
for key in expected_keys:
|
254 |
+
assert key in stats
|
255 |
+
|
256 |
+
# Check default values
|
257 |
+
assert stats['total_orders'] == 0
|
258 |
+
assert stats['successful_orders'] == 0
|
259 |
+
assert stats['failed_orders'] == 0
|
260 |
+
assert stats['success_rate'] == 0.0
|
261 |
+
assert stats['average_execution_time'] == 0.0
|
262 |
+
assert stats['total_commission'] == 0.0
|
263 |
+
|
264 |
+
def test_price_slippage_simulation(self, execution_agent, valid_signal):
|
265 |
+
"""Test price slippage simulation"""
|
266 |
+
# Mock random.uniform to return a known slippage value
|
267 |
+
with patch('random.uniform', return_value=0.001): # 0.1% slippage
|
268 |
+
result = execution_agent._simulate_successful_execution(valid_signal)
|
269 |
+
|
270 |
+
# Price should be slightly different from original
|
271 |
+
original_price = valid_signal['price']
|
272 |
+
executed_price = result['price']
|
273 |
+
|
274 |
+
# Should be within 0.2% of original price
|
275 |
+
price_diff = abs(executed_price - original_price) / original_price
|
276 |
+
assert price_diff <= 0.002
|
277 |
+
|
278 |
+
def test_commission_calculation_edge_cases(self, execution_agent):
|
279 |
+
"""Test commission calculation edge cases"""
|
280 |
+
# Test with zero quantity
|
281 |
+
zero_signal = {'action': 'buy', 'quantity': 0}
|
282 |
+
commission_zero = execution_agent._calculate_commission(zero_signal)
|
283 |
+
assert commission_zero == 1.0 # Only base commission
|
284 |
+
|
285 |
+
# Test with very large quantity
|
286 |
+
large_signal = {'action': 'sell', 'quantity': 10000}
|
287 |
+
commission_large = execution_agent._calculate_commission(large_signal)
|
288 |
+
expected_large = 1.0 + (10000 * 0.01)
|
289 |
+
assert commission_large == expected_large
|
290 |
+
|
291 |
+
def test_order_id_uniqueness(self, execution_agent):
|
292 |
+
"""Test that order IDs are unique"""
|
293 |
+
order_ids = set()
|
294 |
+
|
295 |
+
for _ in range(100):
|
296 |
+
order_id = execution_agent._generate_order_id()
|
297 |
+
order_ids.add(order_id)
|
298 |
+
|
299 |
+
# All order IDs should be unique
|
300 |
+
assert len(order_ids) == 100
|
tests/test_integration.py
ADDED
@@ -0,0 +1,313 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
import pandas as pd
|
3 |
+
import numpy as np
|
4 |
+
import tempfile
|
5 |
+
import os
|
6 |
+
from unittest.mock import patch, MagicMock
|
7 |
+
from agentic_ai_system.orchestrator import run, run_backtest, run_live_trading
|
8 |
+
from agentic_ai_system.main import load_config
|
9 |
+
|
10 |
+
class TestIntegration:
|
11 |
+
"""Integration tests for the entire trading system"""
|
12 |
+
|
13 |
+
@pytest.fixture
|
14 |
+
def config(self):
|
15 |
+
"""Sample configuration for integration testing"""
|
16 |
+
return {
|
17 |
+
'data_source': {
|
18 |
+
'type': 'synthetic',
|
19 |
+
'path': 'data/market_data.csv'
|
20 |
+
},
|
21 |
+
'trading': {
|
22 |
+
'symbol': 'AAPL',
|
23 |
+
'timeframe': '1min',
|
24 |
+
'capital': 100000
|
25 |
+
},
|
26 |
+
'risk': {
|
27 |
+
'max_position': 100,
|
28 |
+
'max_drawdown': 0.05
|
29 |
+
},
|
30 |
+
'execution': {
|
31 |
+
'broker_api': 'paper',
|
32 |
+
'order_size': 10,
|
33 |
+
'delay_ms': 10, # Fast for testing
|
34 |
+
'success_rate': 1.0 # Always succeed for testing
|
35 |
+
},
|
36 |
+
'synthetic_data': {
|
37 |
+
'base_price': 150.0,
|
38 |
+
'volatility': 0.02,
|
39 |
+
'trend': 0.001,
|
40 |
+
'noise_level': 0.005,
|
41 |
+
'data_path': 'data/synthetic_market_data.csv'
|
42 |
+
},
|
43 |
+
'logging': {
|
44 |
+
'log_level': 'INFO',
|
45 |
+
'log_dir': 'logs',
|
46 |
+
'enable_console': True,
|
47 |
+
'enable_file': True
|
48 |
+
}
|
49 |
+
}
|
50 |
+
|
51 |
+
def test_full_workflow(self, config):
|
52 |
+
"""Test the complete trading workflow"""
|
53 |
+
result = run(config)
|
54 |
+
|
55 |
+
# Check result structure
|
56 |
+
assert isinstance(result, dict)
|
57 |
+
assert 'success' in result
|
58 |
+
assert 'data_loaded' in result
|
59 |
+
assert 'signal_generated' in result
|
60 |
+
assert 'order_executed' in result
|
61 |
+
assert 'execution_time' in result
|
62 |
+
assert 'errors' in result
|
63 |
+
|
64 |
+
# Check that data was loaded
|
65 |
+
assert result['data_loaded'] == True
|
66 |
+
|
67 |
+
# Check that signal was generated
|
68 |
+
assert result['signal_generated'] == True
|
69 |
+
|
70 |
+
# Check execution time is reasonable
|
71 |
+
assert result['execution_time'] > 0
|
72 |
+
assert result['execution_time'] < 60 # Should complete within 60 seconds
|
73 |
+
|
74 |
+
def test_backtest_workflow(self, config):
|
75 |
+
"""Test the backtest workflow"""
|
76 |
+
result = run_backtest(config, '2024-01-01', '2024-01-02')
|
77 |
+
|
78 |
+
# Check result structure
|
79 |
+
assert isinstance(result, dict)
|
80 |
+
assert 'success' in result
|
81 |
+
|
82 |
+
if result['success']:
|
83 |
+
assert 'start_date' in result
|
84 |
+
assert 'end_date' in result
|
85 |
+
assert 'initial_capital' in result
|
86 |
+
assert 'final_value' in result
|
87 |
+
assert 'total_return' in result
|
88 |
+
assert 'total_trades' in result
|
89 |
+
assert 'trades' in result
|
90 |
+
assert 'positions' in result
|
91 |
+
|
92 |
+
# Check that backtest completed
|
93 |
+
assert result['initial_capital'] == config['trading']['capital']
|
94 |
+
assert result['final_value'] >= 0
|
95 |
+
assert isinstance(result['total_return'], float)
|
96 |
+
assert result['total_trades'] >= 0
|
97 |
+
assert isinstance(result['trades'], list)
|
98 |
+
assert isinstance(result['positions'], dict)
|
99 |
+
|
100 |
+
def test_live_trading_workflow(self, config):
|
101 |
+
"""Test the live trading workflow (short duration)"""
|
102 |
+
# Test with very short duration to avoid long test times
|
103 |
+
result = run_live_trading(config, duration_minutes=1)
|
104 |
+
|
105 |
+
# Check result structure
|
106 |
+
assert isinstance(result, dict)
|
107 |
+
assert 'success' in result
|
108 |
+
|
109 |
+
if result['success']:
|
110 |
+
assert 'duration_minutes' in result
|
111 |
+
assert 'total_trades' in result
|
112 |
+
assert 'trades' in result
|
113 |
+
assert 'start_time' in result
|
114 |
+
assert 'end_time' in result
|
115 |
+
|
116 |
+
# Check that live trading completed
|
117 |
+
assert result['duration_minutes'] == 1
|
118 |
+
assert result['total_trades'] >= 0
|
119 |
+
assert isinstance(result['trades'], list)
|
120 |
+
|
121 |
+
def test_workflow_with_csv_data(self, config):
|
122 |
+
"""Test workflow with CSV data source"""
|
123 |
+
# Create temporary CSV file
|
124 |
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.csv', delete=False) as tmp_file:
|
125 |
+
# Generate sample data
|
126 |
+
dates = pd.date_range(start='2024-01-01', periods=100, freq='1min')
|
127 |
+
data = []
|
128 |
+
for i, date in enumerate(dates):
|
129 |
+
base_price = 150.0 + (i * 0.1)
|
130 |
+
data.append({
|
131 |
+
'timestamp': date,
|
132 |
+
'open': base_price + np.random.normal(0, 1),
|
133 |
+
'high': base_price + abs(np.random.normal(0, 2)),
|
134 |
+
'low': base_price - abs(np.random.normal(0, 2)),
|
135 |
+
'close': base_price + np.random.normal(0, 1),
|
136 |
+
'volume': np.random.randint(1000, 100000)
|
137 |
+
})
|
138 |
+
|
139 |
+
df = pd.DataFrame(data)
|
140 |
+
df.to_csv(tmp_file.name, index=False)
|
141 |
+
config['data_source']['type'] = 'csv'
|
142 |
+
config['data_source']['path'] = tmp_file.name
|
143 |
+
|
144 |
+
try:
|
145 |
+
result = run(config)
|
146 |
+
|
147 |
+
assert result['success'] == True
|
148 |
+
assert result['data_loaded'] == True
|
149 |
+
assert result['signal_generated'] == True
|
150 |
+
|
151 |
+
finally:
|
152 |
+
os.unlink(tmp_file.name)
|
153 |
+
|
154 |
+
def test_workflow_error_handling(self, config):
|
155 |
+
"""Test workflow error handling"""
|
156 |
+
# Test with invalid configuration
|
157 |
+
invalid_config = config.copy()
|
158 |
+
invalid_config['data_source']['type'] = 'invalid_type'
|
159 |
+
|
160 |
+
result = run(invalid_config)
|
161 |
+
|
162 |
+
assert result['success'] == False
|
163 |
+
assert len(result['errors']) > 0
|
164 |
+
|
165 |
+
def test_backtest_with_different_periods(self, config):
|
166 |
+
"""Test backtest with different time periods"""
|
167 |
+
# Test short period
|
168 |
+
short_result = run_backtest(config, '2024-01-01', '2024-01-01')
|
169 |
+
assert isinstance(short_result, dict)
|
170 |
+
|
171 |
+
# Test longer period
|
172 |
+
long_result = run_backtest(config, '2024-01-01', '2024-01-07')
|
173 |
+
assert isinstance(long_result, dict)
|
174 |
+
|
175 |
+
# Both should be valid results (success or failure)
|
176 |
+
assert 'success' in short_result
|
177 |
+
assert 'success' in long_result
|
178 |
+
|
179 |
+
def test_system_with_different_symbols(self, config):
|
180 |
+
"""Test system with different trading symbols"""
|
181 |
+
symbols = ['AAPL', 'GOOGL', 'MSFT', 'TSLA']
|
182 |
+
|
183 |
+
for symbol in symbols:
|
184 |
+
test_config = config.copy()
|
185 |
+
test_config['trading']['symbol'] = symbol
|
186 |
+
|
187 |
+
result = run(test_config)
|
188 |
+
|
189 |
+
assert result['success'] == True
|
190 |
+
assert result['data_loaded'] == True
|
191 |
+
assert result['signal_generated'] == True
|
192 |
+
|
193 |
+
def test_system_with_different_capital_amounts(self, config):
|
194 |
+
"""Test system with different capital amounts"""
|
195 |
+
capital_amounts = [10000, 50000, 100000, 500000]
|
196 |
+
|
197 |
+
for capital in capital_amounts:
|
198 |
+
test_config = config.copy()
|
199 |
+
test_config['trading']['capital'] = capital
|
200 |
+
|
201 |
+
result = run(test_config)
|
202 |
+
|
203 |
+
assert result['success'] == True
|
204 |
+
assert result['data_loaded'] == True
|
205 |
+
assert result['signal_generated'] == True
|
206 |
+
|
207 |
+
def test_execution_failure_simulation(self, config):
|
208 |
+
"""Test system behavior with execution failures"""
|
209 |
+
# Set success rate to 0 to simulate all failures
|
210 |
+
test_config = config.copy()
|
211 |
+
test_config['execution']['success_rate'] = 0.0
|
212 |
+
|
213 |
+
result = run(test_config)
|
214 |
+
|
215 |
+
# System should still complete workflow
|
216 |
+
assert result['success'] == True
|
217 |
+
assert result['data_loaded'] == True
|
218 |
+
assert result['signal_generated'] == True
|
219 |
+
|
220 |
+
# But order execution should fail
|
221 |
+
if result['order_executed']:
|
222 |
+
assert result['execution_result']['success'] == False
|
223 |
+
|
224 |
+
def test_data_validation_integration(self, config):
|
225 |
+
"""Test data validation integration"""
|
226 |
+
# Create invalid data
|
227 |
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.csv', delete=False) as tmp_file:
|
228 |
+
invalid_data = pd.DataFrame({
|
229 |
+
'timestamp': pd.date_range('2024-01-01', periods=10, freq='1min'),
|
230 |
+
'open': [150] * 10,
|
231 |
+
'high': [145] * 10, # Invalid: high < open
|
232 |
+
'low': [145] * 10,
|
233 |
+
'close': [152] * 10,
|
234 |
+
'volume': [1000] * 10
|
235 |
+
})
|
236 |
+
invalid_data.to_csv(tmp_file.name, index=False)
|
237 |
+
config['data_source']['type'] = 'csv'
|
238 |
+
config['data_source']['path'] = tmp_file.name
|
239 |
+
|
240 |
+
try:
|
241 |
+
result = run(config)
|
242 |
+
|
243 |
+
# System should still work (fallback to synthetic data)
|
244 |
+
assert result['success'] == True
|
245 |
+
|
246 |
+
finally:
|
247 |
+
os.unlink(tmp_file.name)
|
248 |
+
|
249 |
+
def test_performance_metrics(self, config):
|
250 |
+
"""Test that performance metrics are calculated correctly"""
|
251 |
+
result = run_backtest(config, '2024-01-01', '2024-01-03')
|
252 |
+
|
253 |
+
if result['success']:
|
254 |
+
# Check that return is calculated correctly
|
255 |
+
initial_capital = result['initial_capital']
|
256 |
+
final_value = result['final_value']
|
257 |
+
calculated_return = (final_value - initial_capital) / initial_capital
|
258 |
+
|
259 |
+
assert abs(result['total_return'] - calculated_return) < 0.001
|
260 |
+
|
261 |
+
# Check that trade count is reasonable
|
262 |
+
assert result['total_trades'] >= 0
|
263 |
+
|
264 |
+
def test_config_loading(self):
|
265 |
+
"""Test configuration loading functionality"""
|
266 |
+
# Test with valid config
|
267 |
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as tmp_file:
|
268 |
+
config_content = """
|
269 |
+
data_source:
|
270 |
+
type: 'synthetic'
|
271 |
+
path: 'data/market_data.csv'
|
272 |
+
|
273 |
+
trading:
|
274 |
+
symbol: 'AAPL'
|
275 |
+
timeframe: '1min'
|
276 |
+
capital: 100000
|
277 |
+
|
278 |
+
risk:
|
279 |
+
max_position: 100
|
280 |
+
max_drawdown: 0.05
|
281 |
+
|
282 |
+
execution:
|
283 |
+
broker_api: 'paper'
|
284 |
+
order_size: 10
|
285 |
+
"""
|
286 |
+
tmp_file.write(config_content)
|
287 |
+
tmp_file.flush()
|
288 |
+
|
289 |
+
try:
|
290 |
+
config = load_config(tmp_file.name)
|
291 |
+
|
292 |
+
assert config['data_source']['type'] == 'synthetic'
|
293 |
+
assert config['trading']['symbol'] == 'AAPL'
|
294 |
+
assert config['trading']['capital'] == 100000
|
295 |
+
|
296 |
+
finally:
|
297 |
+
os.unlink(tmp_file.name)
|
298 |
+
|
299 |
+
def test_system_scalability(self, config):
|
300 |
+
"""Test system scalability with larger datasets"""
|
301 |
+
# Test with larger synthetic dataset
|
302 |
+
test_config = config.copy()
|
303 |
+
test_config['synthetic_data']['base_price'] = 200.0
|
304 |
+
test_config['synthetic_data']['volatility'] = 0.03
|
305 |
+
|
306 |
+
result = run(test_config)
|
307 |
+
|
308 |
+
assert result['success'] == True
|
309 |
+
assert result['data_loaded'] == True
|
310 |
+
assert result['signal_generated'] == True
|
311 |
+
|
312 |
+
# Check execution time is reasonable
|
313 |
+
assert result['execution_time'] < 30 # Should complete within 30 seconds
|
tests/test_strategy_agent.py
ADDED
@@ -0,0 +1,252 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
import pandas as pd
|
3 |
+
import numpy as np
|
4 |
+
from datetime import datetime, timedelta
|
5 |
+
from agentic_ai_system.strategy_agent import StrategyAgent
|
6 |
+
|
7 |
+
class TestStrategyAgent:
|
8 |
+
"""Test cases for StrategyAgent"""
|
9 |
+
|
10 |
+
@pytest.fixture
|
11 |
+
def config(self):
|
12 |
+
"""Sample configuration for testing"""
|
13 |
+
return {
|
14 |
+
'trading': {
|
15 |
+
'symbol': 'AAPL',
|
16 |
+
'timeframe': '1min',
|
17 |
+
'capital': 100000
|
18 |
+
},
|
19 |
+
'risk': {
|
20 |
+
'max_position': 100,
|
21 |
+
'max_drawdown': 0.05
|
22 |
+
},
|
23 |
+
'execution': {
|
24 |
+
'broker_api': 'paper',
|
25 |
+
'order_size': 10
|
26 |
+
}
|
27 |
+
}
|
28 |
+
|
29 |
+
@pytest.fixture
|
30 |
+
def strategy_agent(self, config):
|
31 |
+
"""Create a StrategyAgent instance"""
|
32 |
+
return StrategyAgent(config)
|
33 |
+
|
34 |
+
@pytest.fixture
|
35 |
+
def sample_data(self):
|
36 |
+
"""Create sample market data for testing"""
|
37 |
+
dates = pd.date_range(start='2024-01-01', periods=100, freq='1min')
|
38 |
+
|
39 |
+
# Generate realistic price data
|
40 |
+
base_price = 150.0
|
41 |
+
prices = []
|
42 |
+
for i in range(100):
|
43 |
+
# Add some trend and noise
|
44 |
+
price = base_price + (i * 0.1) + np.random.normal(0, 2)
|
45 |
+
prices.append(max(price, 1)) # Ensure positive prices
|
46 |
+
|
47 |
+
data = []
|
48 |
+
for i, (date, close_price) in enumerate(zip(dates, prices)):
|
49 |
+
# Generate OHLC from close price
|
50 |
+
noise = np.random.normal(0, 1)
|
51 |
+
open_price = close_price + noise
|
52 |
+
high_price = max(open_price, close_price) + abs(np.random.normal(0, 2))
|
53 |
+
low_price = min(open_price, close_price) - abs(np.random.normal(0, 2))
|
54 |
+
volume = np.random.randint(1000, 100000)
|
55 |
+
|
56 |
+
data.append({
|
57 |
+
'timestamp': date,
|
58 |
+
'open': round(open_price, 2),
|
59 |
+
'high': round(high_price, 2),
|
60 |
+
'low': round(low_price, 2),
|
61 |
+
'close': round(close_price, 2),
|
62 |
+
'volume': volume
|
63 |
+
})
|
64 |
+
|
65 |
+
return pd.DataFrame(data)
|
66 |
+
|
67 |
+
def test_initialization(self, strategy_agent, config):
|
68 |
+
"""Test agent initialization"""
|
69 |
+
assert strategy_agent.symbol == config['trading']['symbol']
|
70 |
+
assert strategy_agent.capital == config['trading']['capital']
|
71 |
+
assert strategy_agent.max_position == config['risk']['max_position']
|
72 |
+
assert strategy_agent.max_drawdown == config['risk']['max_drawdown']
|
73 |
+
|
74 |
+
def test_act_with_valid_data(self, strategy_agent, sample_data):
|
75 |
+
"""Test signal generation with valid data"""
|
76 |
+
signal = strategy_agent.act(sample_data)
|
77 |
+
|
78 |
+
# Check signal structure
|
79 |
+
assert isinstance(signal, dict)
|
80 |
+
assert 'action' in signal
|
81 |
+
assert 'symbol' in signal
|
82 |
+
assert 'quantity' in signal
|
83 |
+
assert 'price' in signal
|
84 |
+
assert 'confidence' in signal
|
85 |
+
|
86 |
+
# Check action values
|
87 |
+
assert signal['action'] in ['buy', 'sell', 'hold']
|
88 |
+
assert signal['symbol'] == strategy_agent.symbol
|
89 |
+
assert signal['quantity'] >= 0
|
90 |
+
assert signal['price'] > 0
|
91 |
+
assert 0 <= signal['confidence'] <= 1
|
92 |
+
|
93 |
+
def test_act_with_empty_data(self, strategy_agent):
|
94 |
+
"""Test signal generation with empty data"""
|
95 |
+
empty_data = pd.DataFrame()
|
96 |
+
signal = strategy_agent.act(empty_data)
|
97 |
+
|
98 |
+
assert signal['action'] == 'hold'
|
99 |
+
assert signal['quantity'] == 0
|
100 |
+
assert signal['confidence'] == 0.0
|
101 |
+
|
102 |
+
def test_calculate_indicators(self, strategy_agent, sample_data):
|
103 |
+
"""Test technical indicator calculations"""
|
104 |
+
indicators = strategy_agent._calculate_indicators(sample_data)
|
105 |
+
|
106 |
+
# Check that indicators are calculated
|
107 |
+
expected_indicators = ['sma_20', 'sma_50', 'rsi', 'bb_upper', 'bb_lower', 'macd', 'macd_signal']
|
108 |
+
for indicator in expected_indicators:
|
109 |
+
assert indicator in indicators
|
110 |
+
|
111 |
+
# Check that indicators have reasonable values
|
112 |
+
if len(indicators['sma_20']) > 0:
|
113 |
+
assert indicators['sma_20'][-1] > 0
|
114 |
+
|
115 |
+
if len(indicators['rsi']) > 0:
|
116 |
+
rsi_value = indicators['rsi'][-1]
|
117 |
+
assert 0 <= rsi_value <= 100
|
118 |
+
|
119 |
+
def test_calculate_sma(self, strategy_agent):
|
120 |
+
"""Test Simple Moving Average calculation"""
|
121 |
+
prices = np.array([100, 101, 102, 103, 104, 105, 106, 107, 108, 109])
|
122 |
+
|
123 |
+
# Test SMA with window 3
|
124 |
+
sma = strategy_agent._calculate_sma(prices, 3)
|
125 |
+
expected_sma = np.array([101, 102, 103, 104, 105, 106, 107, 108])
|
126 |
+
|
127 |
+
np.testing.assert_array_almost_equal(sma, expected_sma, decimal=2)
|
128 |
+
|
129 |
+
# Test with insufficient data
|
130 |
+
short_prices = np.array([100, 101])
|
131 |
+
sma_short = strategy_agent._calculate_sma(short_prices, 3)
|
132 |
+
assert len(sma_short) == 0
|
133 |
+
|
134 |
+
def test_calculate_rsi(self, strategy_agent):
|
135 |
+
"""Test RSI calculation"""
|
136 |
+
# Create price data with known pattern
|
137 |
+
prices = np.array([100, 101, 102, 101, 100, 99, 98, 99, 100, 101])
|
138 |
+
|
139 |
+
rsi = strategy_agent._calculate_rsi(prices, window=3)
|
140 |
+
|
141 |
+
# RSI should be between 0 and 100
|
142 |
+
if len(rsi) > 0:
|
143 |
+
assert 0 <= rsi[-1] <= 100
|
144 |
+
|
145 |
+
def test_calculate_bollinger_bands(self, strategy_agent):
|
146 |
+
"""Test Bollinger Bands calculation"""
|
147 |
+
prices = np.array([100, 101, 102, 103, 104, 105, 106, 107, 108, 109])
|
148 |
+
|
149 |
+
bb_upper, bb_lower = strategy_agent._calculate_bollinger_bands(prices, window=5)
|
150 |
+
|
151 |
+
if len(bb_upper) > 0 and len(bb_lower) > 0:
|
152 |
+
# Upper band should be above lower band
|
153 |
+
assert bb_upper[-1] > bb_lower[-1]
|
154 |
+
|
155 |
+
def test_calculate_position_size(self, strategy_agent):
|
156 |
+
"""Test position size calculation"""
|
157 |
+
price = 150.0
|
158 |
+
|
159 |
+
# Test normal case
|
160 |
+
quantity = strategy_agent._calculate_position_size(price)
|
161 |
+
expected_quantity = int((strategy_agent.capital * 0.1) / price)
|
162 |
+
expected_quantity = min(expected_quantity, strategy_agent.max_position)
|
163 |
+
|
164 |
+
assert quantity == expected_quantity
|
165 |
+
assert quantity >= 1
|
166 |
+
|
167 |
+
# Test with very high price
|
168 |
+
high_price = 10000.0
|
169 |
+
quantity_high = strategy_agent._calculate_position_size(high_price)
|
170 |
+
assert quantity_high == 1 # Minimum quantity
|
171 |
+
|
172 |
+
def test_generate_no_action_signal(self, strategy_agent):
|
173 |
+
"""Test no-action signal generation"""
|
174 |
+
signal = strategy_agent._generate_no_action_signal()
|
175 |
+
|
176 |
+
assert signal['action'] == 'hold'
|
177 |
+
assert signal['quantity'] == 0
|
178 |
+
assert signal['price'] == 0
|
179 |
+
assert signal['confidence'] == 0.0
|
180 |
+
assert signal['symbol'] == strategy_agent.symbol
|
181 |
+
|
182 |
+
def test_signal_generation_logic(self, strategy_agent, sample_data):
|
183 |
+
"""Test signal generation logic with different market conditions"""
|
184 |
+
# Test with upward trending data (should generate buy signal)
|
185 |
+
upward_data = sample_data.copy()
|
186 |
+
upward_data['close'] = upward_data['close'] * 1.1 # 10% increase
|
187 |
+
|
188 |
+
signal_up = strategy_agent.act(upward_data)
|
189 |
+
|
190 |
+
# Test with downward trending data (should generate sell signal)
|
191 |
+
downward_data = sample_data.copy()
|
192 |
+
downward_data['close'] = downward_data['close'] * 0.9 # 10% decrease
|
193 |
+
|
194 |
+
signal_down = strategy_agent.act(downward_data)
|
195 |
+
|
196 |
+
# Both should be valid signals
|
197 |
+
assert signal_up['action'] in ['buy', 'sell', 'hold']
|
198 |
+
assert signal_down['action'] in ['buy', 'sell', 'hold']
|
199 |
+
|
200 |
+
def test_error_handling(self, strategy_agent):
|
201 |
+
"""Test error handling in signal generation"""
|
202 |
+
# Test with invalid data
|
203 |
+
invalid_data = pd.DataFrame({'invalid_column': [1, 2, 3]})
|
204 |
+
|
205 |
+
# Should not raise exception, should return hold signal
|
206 |
+
signal = strategy_agent.act(invalid_data)
|
207 |
+
assert signal['action'] == 'hold'
|
208 |
+
|
209 |
+
def test_technical_indicators_edge_cases(self, strategy_agent):
|
210 |
+
"""Test technical indicators with edge cases"""
|
211 |
+
# Test with constant prices
|
212 |
+
constant_prices = np.ones(50) * 100
|
213 |
+
rsi_constant = strategy_agent._calculate_rsi(constant_prices)
|
214 |
+
|
215 |
+
# Test with all increasing prices
|
216 |
+
increasing_prices = np.arange(100, 150)
|
217 |
+
rsi_increasing = strategy_agent._calculate_rsi(increasing_prices)
|
218 |
+
|
219 |
+
# Test with all decreasing prices
|
220 |
+
decreasing_prices = np.arange(150, 100, -1)
|
221 |
+
rsi_decreasing = strategy_agent._calculate_rsi(decreasing_prices)
|
222 |
+
|
223 |
+
# All should return valid arrays (possibly empty)
|
224 |
+
assert isinstance(rsi_constant, np.ndarray)
|
225 |
+
assert isinstance(rsi_increasing, np.ndarray)
|
226 |
+
assert isinstance(rsi_decreasing, np.ndarray)
|
227 |
+
|
228 |
+
def test_macd_calculation(self, strategy_agent):
|
229 |
+
"""Test MACD calculation"""
|
230 |
+
prices = np.array([100 + i * 0.1 + np.random.normal(0, 1) for i in range(50)])
|
231 |
+
|
232 |
+
macd, signal = strategy_agent._calculate_macd(prices)
|
233 |
+
|
234 |
+
# Both should be numpy arrays
|
235 |
+
assert isinstance(macd, np.ndarray)
|
236 |
+
assert isinstance(signal, np.ndarray)
|
237 |
+
|
238 |
+
# If we have enough data, both should have values
|
239 |
+
if len(prices) >= 26:
|
240 |
+
assert len(macd) > 0
|
241 |
+
if len(macd) >= 9: # Need enough data for signal line
|
242 |
+
assert len(signal) > 0
|
243 |
+
|
244 |
+
def test_ema_calculation(self, strategy_agent):
|
245 |
+
"""Test Exponential Moving Average calculation"""
|
246 |
+
prices = np.array([100, 101, 102, 103, 104, 105, 106, 107, 108, 109])
|
247 |
+
|
248 |
+
ema = strategy_agent._calculate_ema(prices, window=5)
|
249 |
+
|
250 |
+
assert isinstance(ema, np.ndarray)
|
251 |
+
if len(ema) > 0:
|
252 |
+
assert ema[-1] > 0 # EMA should be positive
|
tests/test_synthetic_data_generator.py
ADDED
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
import pandas as pd
|
3 |
+
import numpy as np
|
4 |
+
from datetime import datetime
|
5 |
+
import tempfile
|
6 |
+
import os
|
7 |
+
from agentic_ai_system.synthetic_data_generator import SyntheticDataGenerator
|
8 |
+
|
9 |
+
class TestSyntheticDataGenerator:
|
10 |
+
"""Test cases for SyntheticDataGenerator"""
|
11 |
+
|
12 |
+
@pytest.fixture
|
13 |
+
def config(self):
|
14 |
+
"""Sample configuration for testing"""
|
15 |
+
return {
|
16 |
+
'synthetic_data': {
|
17 |
+
'base_price': 100.0,
|
18 |
+
'volatility': 0.02,
|
19 |
+
'trend': 0.001,
|
20 |
+
'noise_level': 0.005
|
21 |
+
},
|
22 |
+
'trading': {
|
23 |
+
'symbol': 'AAPL',
|
24 |
+
'timeframe': '1min'
|
25 |
+
}
|
26 |
+
}
|
27 |
+
|
28 |
+
@pytest.fixture
|
29 |
+
def generator(self, config):
|
30 |
+
"""Create a SyntheticDataGenerator instance"""
|
31 |
+
return SyntheticDataGenerator(config)
|
32 |
+
|
33 |
+
def test_initialization(self, generator, config):
|
34 |
+
"""Test generator initialization"""
|
35 |
+
assert generator.base_price == config['synthetic_data']['base_price']
|
36 |
+
assert generator.volatility == config['synthetic_data']['volatility']
|
37 |
+
assert generator.trend == config['synthetic_data']['trend']
|
38 |
+
assert generator.noise_level == config['synthetic_data']['noise_level']
|
39 |
+
|
40 |
+
def test_generate_ohlcv_data(self, generator):
|
41 |
+
"""Test OHLCV data generation"""
|
42 |
+
df = generator.generate_ohlcv_data(
|
43 |
+
symbol='AAPL',
|
44 |
+
start_date='2024-01-01',
|
45 |
+
end_date='2024-01-02',
|
46 |
+
frequency='1min'
|
47 |
+
)
|
48 |
+
|
49 |
+
# Check DataFrame structure
|
50 |
+
assert isinstance(df, pd.DataFrame)
|
51 |
+
assert len(df) > 0
|
52 |
+
|
53 |
+
# Check required columns
|
54 |
+
required_columns = ['timestamp', 'symbol', 'open', 'high', 'low', 'close', 'volume']
|
55 |
+
for col in required_columns:
|
56 |
+
assert col in df.columns
|
57 |
+
|
58 |
+
# Check data types
|
59 |
+
assert df['timestamp'].dtype == 'datetime64[ns]'
|
60 |
+
assert df['symbol'].dtype == 'object'
|
61 |
+
assert df['open'].dtype in ['float64', 'float32']
|
62 |
+
assert df['high'].dtype in ['float64', 'float32']
|
63 |
+
assert df['low'].dtype in ['float64', 'float32']
|
64 |
+
assert df['close'].dtype in ['float64', 'float32']
|
65 |
+
assert df['volume'].dtype in ['int64', 'int32']
|
66 |
+
|
67 |
+
# Check data validity
|
68 |
+
assert (df['high'] >= df['low']).all()
|
69 |
+
assert (df['high'] >= df['open']).all()
|
70 |
+
assert (df['high'] >= df['close']).all()
|
71 |
+
assert (df['low'] <= df['open']).all()
|
72 |
+
assert (df['low'] <= df['close']).all()
|
73 |
+
assert (df['volume'] >= 0).all()
|
74 |
+
assert (df['open'] > 0).all()
|
75 |
+
assert (df['close'] > 0).all()
|
76 |
+
|
77 |
+
def test_generate_tick_data(self, generator):
|
78 |
+
"""Test tick data generation"""
|
79 |
+
df = generator.generate_tick_data(
|
80 |
+
symbol='AAPL',
|
81 |
+
duration_minutes=10,
|
82 |
+
tick_interval_ms=1000
|
83 |
+
)
|
84 |
+
|
85 |
+
# Check DataFrame structure
|
86 |
+
assert isinstance(df, pd.DataFrame)
|
87 |
+
assert len(df) > 0
|
88 |
+
|
89 |
+
# Check required columns
|
90 |
+
required_columns = ['timestamp', 'symbol', 'price', 'volume']
|
91 |
+
for col in required_columns:
|
92 |
+
assert col in df.columns
|
93 |
+
|
94 |
+
# Check data validity
|
95 |
+
assert (df['price'] > 0).all()
|
96 |
+
assert (df['volume'] >= 0).all()
|
97 |
+
assert df['symbol'].iloc[0] == 'AAPL'
|
98 |
+
|
99 |
+
def test_generate_price_series(self, generator):
|
100 |
+
"""Test price series generation"""
|
101 |
+
length = 100
|
102 |
+
prices = generator._generate_price_series(length)
|
103 |
+
|
104 |
+
assert isinstance(prices, np.ndarray)
|
105 |
+
assert len(prices) == length
|
106 |
+
assert (prices > 0).all() # All prices should be positive
|
107 |
+
|
108 |
+
def test_save_to_csv(self, generator):
|
109 |
+
"""Test saving data to CSV"""
|
110 |
+
df = generator.generate_ohlcv_data(
|
111 |
+
symbol='AAPL',
|
112 |
+
start_date='2024-01-01',
|
113 |
+
end_date='2024-01-01',
|
114 |
+
frequency='1H'
|
115 |
+
)
|
116 |
+
|
117 |
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.csv', delete=False) as tmp_file:
|
118 |
+
filepath = tmp_file.name
|
119 |
+
|
120 |
+
try:
|
121 |
+
generator.save_to_csv(df, filepath)
|
122 |
+
|
123 |
+
# Check if file exists and has content
|
124 |
+
assert os.path.exists(filepath)
|
125 |
+
assert os.path.getsize(filepath) > 0
|
126 |
+
|
127 |
+
# Load and verify data
|
128 |
+
loaded_df = pd.read_csv(filepath)
|
129 |
+
assert len(loaded_df) == len(df)
|
130 |
+
assert list(loaded_df.columns) == list(df.columns)
|
131 |
+
|
132 |
+
finally:
|
133 |
+
# Cleanup
|
134 |
+
if os.path.exists(filepath):
|
135 |
+
os.unlink(filepath)
|
136 |
+
|
137 |
+
def test_market_scenarios(self, generator):
|
138 |
+
"""Test different market scenarios"""
|
139 |
+
scenarios = ['normal', 'volatile', 'trending', 'crash']
|
140 |
+
|
141 |
+
for scenario in scenarios:
|
142 |
+
df = generator.generate_market_scenarios(scenario)
|
143 |
+
|
144 |
+
assert isinstance(df, pd.DataFrame)
|
145 |
+
assert len(df) > 0
|
146 |
+
|
147 |
+
# Check that crash scenario has lower prices on average
|
148 |
+
if scenario == 'crash':
|
149 |
+
avg_price = df['close'].mean()
|
150 |
+
assert avg_price < generator.base_price * 0.9 # Should be significantly lower
|
151 |
+
|
152 |
+
def test_invalid_frequency(self, generator):
|
153 |
+
"""Test handling of invalid frequency"""
|
154 |
+
with pytest.raises(ValueError, match="Unsupported frequency"):
|
155 |
+
generator.generate_ohlcv_data(frequency='invalid')
|
156 |
+
|
157 |
+
def test_invalid_scenario(self, generator):
|
158 |
+
"""Test handling of invalid scenario"""
|
159 |
+
with pytest.raises(ValueError, match="Unknown scenario type"):
|
160 |
+
generator.generate_market_scenarios('invalid_scenario')
|
161 |
+
|
162 |
+
def test_empty_date_range(self, generator):
|
163 |
+
"""Test handling of empty date range"""
|
164 |
+
df = generator.generate_ohlcv_data(
|
165 |
+
start_date='2024-01-01',
|
166 |
+
end_date='2024-01-01',
|
167 |
+
frequency='1D'
|
168 |
+
)
|
169 |
+
|
170 |
+
# Should generate at least one data point
|
171 |
+
assert len(df) >= 1
|
172 |
+
|
173 |
+
def test_different_symbols(self, generator):
|
174 |
+
"""Test data generation for different symbols"""
|
175 |
+
symbols = ['AAPL', 'GOOGL', 'MSFT', 'TSLA']
|
176 |
+
|
177 |
+
for symbol in symbols:
|
178 |
+
df = generator.generate_ohlcv_data(symbol=symbol)
|
179 |
+
assert df['symbol'].iloc[0] == symbol
|
180 |
+
|
181 |
+
def test_price_consistency(self, generator):
|
182 |
+
"""Test that generated prices are consistent"""
|
183 |
+
df = generator.generate_ohlcv_data(
|
184 |
+
start_date='2024-01-01',
|
185 |
+
end_date='2024-01-02',
|
186 |
+
frequency='1H'
|
187 |
+
)
|
188 |
+
|
189 |
+
# Check that prices are within reasonable bounds
|
190 |
+
max_price = df[['open', 'high', 'low', 'close']].max().max()
|
191 |
+
min_price = df[['open', 'high', 'low', 'close']].min().min()
|
192 |
+
|
193 |
+
# Prices should be within 50% of base price
|
194 |
+
assert min_price > generator.base_price * 0.5
|
195 |
+
assert max_price < generator.base_price * 1.5
|
196 |
+
|
197 |
+
def test_volume_correlation(self, generator):
|
198 |
+
"""Test that volume correlates with price movement"""
|
199 |
+
df = generator.generate_ohlcv_data(
|
200 |
+
start_date='2024-01-01',
|
201 |
+
end_date='2024-01-02',
|
202 |
+
frequency='1H'
|
203 |
+
)
|
204 |
+
|
205 |
+
# Calculate price movement
|
206 |
+
df['price_movement'] = abs(df['close'] - df['open'])
|
207 |
+
|
208 |
+
# Check that volume is correlated with price movement
|
209 |
+
correlation = df['volume'].corr(df['price_movement'])
|
210 |
+
assert not np.isnan(correlation) # Should have some correlation
|