File size: 11,775 Bytes
859af74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63f74a3
859af74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63f74a3
859af74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63f74a3
859af74
 
 
 
 
63f74a3
859af74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63f74a3
 
 
859af74
 
 
 
 
 
 
63f74a3
859af74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
import pytest
import pandas as pd
import numpy as np
import tempfile
import os
from unittest.mock import patch, MagicMock
from agentic_ai_system.orchestrator import run, run_backtest, run_live_trading
from agentic_ai_system.main import load_config

class TestIntegration:
    """Integration tests for the entire trading system"""
    
    @pytest.fixture
    def config(self):
        """Sample configuration for integration testing"""
        return {
            'data_source': {
                'type': 'synthetic',
                'path': 'data/synthetic_market_data_test.csv'
            },
            'trading': {
                'symbol': 'AAPL',
                'timeframe': '1min',
                'capital': 100000
            },
            'risk': {
                'max_position': 100,
                'max_drawdown': 0.05
            },
            'execution': {
                'broker_api': 'paper',
                'order_size': 10,
                'delay_ms': 10,  # Fast for testing
                'success_rate': 1.0  # Always succeed for testing
            },
            'synthetic_data': {
                'base_price': 150.0,
                'volatility': 0.02,
                'trend': 0.001,
                'noise_level': 0.005,
                'data_path': 'data/synthetic_market_data_test.csv'
            },
            'logging': {
                'log_level': 'INFO',
                'log_dir': 'logs',
                'enable_console': True,
                'enable_file': True
            }
        }
    
    def test_full_workflow(self, config):
        """Test the complete trading workflow"""
        result = run(config)
        
        # Check result structure
        assert isinstance(result, dict)
        assert 'success' in result
        assert 'data_loaded' in result
        assert 'signal_generated' in result
        assert 'order_executed' in result
        assert 'execution_time' in result
        assert 'errors' in result
        
        # Check that data was loaded
        assert result['data_loaded'] == True
        
        # Check that signal was generated
        assert result['signal_generated'] == True
        
        # Check execution time is reasonable
        assert result['execution_time'] > 0
        assert result['execution_time'] < 60  # Should complete within 60 seconds
    
    def test_backtest_workflow(self, config):
        """Test the backtest workflow"""
        result = run_backtest(config, '2024-01-01', '2024-01-02')
        
        # Check result structure
        assert isinstance(result, dict)
        assert 'success' in result
        
        if result['success']:
            assert 'start_date' in result
            assert 'end_date' in result
            assert 'initial_capital' in result
            assert 'final_value' in result
            assert 'total_return' in result
            assert 'total_trades' in result
            assert 'trades' in result
            assert 'positions' in result
            
            # Check that backtest completed
            assert result['initial_capital'] == config['trading']['capital']
            assert result['final_value'] >= 0
            assert isinstance(result['total_return'], float)
            assert result['total_trades'] >= 0
            assert isinstance(result['trades'], list)
            assert isinstance(result['positions'], dict)
    
    def test_live_trading_workflow(self, config):
        """Test the live trading workflow (short duration)"""
        # Test with very short duration to avoid long test times
        result = run_live_trading(config, duration_minutes=1)
        
        # Check result structure
        assert isinstance(result, dict)
        assert 'success' in result
        
        if result['success']:
            assert 'duration_minutes' in result
            assert 'total_trades' in result
            assert 'trades' in result
            assert 'start_time' in result
            assert 'end_time' in result
            
            # Check that live trading completed
            assert result['duration_minutes'] == 1
            assert result['total_trades'] >= 0
            assert isinstance(result['trades'], list)
    
    def test_workflow_with_csv_data(self, config):
        """Test workflow with CSV data source"""
        # Create temporary CSV file
        with tempfile.NamedTemporaryFile(mode='w', suffix='.csv', delete=False) as tmp_file:
            # Generate sample data with correct column names
            dates = pd.date_range(start='2024-01-01', periods=100, freq='1min')
            data = []
            for i, date in enumerate(dates):
                base_price = 150.0 + (i * 0.1)
                data.append({
                    'date': date,
                    'open': base_price + np.random.normal(0, 1),
                    'high': base_price + abs(np.random.normal(0, 2)),
                    'low': base_price - abs(np.random.normal(0, 2)),
                    'close': base_price + np.random.normal(0, 1),
                    'volume': np.random.randint(1000, 100000)
                })
            
            df = pd.DataFrame(data)
            df.to_csv(tmp_file.name, index=False)
            config['data_source']['type'] = 'csv'
            config['data_source']['path'] = tmp_file.name
            
            try:
                result = run(config)
                
                assert result['success'] == True
                assert result['data_loaded'] == True
                assert result['signal_generated'] == True
                
            finally:
                os.unlink(tmp_file.name)
    
    def test_workflow_error_handling(self, config):
        """Test workflow error handling"""
        # Test with invalid configuration
        invalid_config = config.copy()
        invalid_config['data_source']['type'] = 'invalid_type'
        
        result = run(invalid_config)
        
        assert result['success'] == False
        assert len(result['errors']) > 0
    
    def test_backtest_with_different_periods(self, config):
        """Test backtest with different time periods"""
        # Test short period
        short_result = run_backtest(config, '2024-01-01', '2024-01-01')
        assert isinstance(short_result, dict)
        
        # Test longer period
        long_result = run_backtest(config, '2024-01-01', '2024-01-07')
        assert isinstance(long_result, dict)
        
        # Both should be valid results (success or failure)
        assert 'success' in short_result
        assert 'success' in long_result
    
    def test_system_with_different_symbols(self, config):
        """Test system with different trading symbols"""
        symbols = ['AAPL', 'GOOGL', 'MSFT', 'TSLA']
        
        for symbol in symbols:
            test_config = config.copy()
            test_config['trading']['symbol'] = symbol
            
            result = run(test_config)
            
            assert result['success'] == True
            assert result['data_loaded'] == True
            assert result['signal_generated'] == True
    
    def test_system_with_different_capital_amounts(self, config):
        """Test system with different capital amounts"""
        capital_amounts = [10000, 50000, 100000, 500000]
        
        for capital in capital_amounts:
            test_config = config.copy()
            test_config['trading']['capital'] = capital
            
            result = run(test_config)
            
            assert result['success'] == True
            assert result['data_loaded'] == True
            assert result['signal_generated'] == True
    
    def test_execution_failure_simulation(self, config):
        """Test system behavior with execution failures"""
        # Set success rate to 0 to simulate all failures
        test_config = config.copy()
        test_config['execution']['success_rate'] = 0.0
        
        result = run(test_config)
        
        # System should still complete workflow
        assert result['success'] == True
        assert result['data_loaded'] == True
        assert result['signal_generated'] == True
        
        # If a non-hold order was executed, it should fail with success_rate = 0.0
        # But if only hold signals were generated, no orders would be executed
        if result['order_executed'] and result.get('execution_result', {}).get('action') != 'hold':
            assert result['execution_result']['success'] == False
    
    def test_data_validation_integration(self, config):
        """Test data validation integration"""
        # Create invalid data
        with tempfile.NamedTemporaryFile(mode='w', suffix='.csv', delete=False) as tmp_file:
            invalid_data = pd.DataFrame({
                'date': pd.date_range('2024-01-01', periods=10, freq='1min'),
                'open': [150] * 10,
                'high': [145] * 10,  # Invalid: high < open
                'low': [145] * 10,
                'close': [152] * 10,
                'volume': [1000] * 10
            })
            invalid_data.to_csv(tmp_file.name, index=False)
            config['data_source']['type'] = 'csv'
            config['data_source']['path'] = tmp_file.name
            
            try:
                result = run(config)
                
                # System should still work (fallback to synthetic data)
                assert result['success'] == True
                
            finally:
                os.unlink(tmp_file.name)
    
    def test_performance_metrics(self, config):
        """Test that performance metrics are calculated correctly"""
        result = run_backtest(config, '2024-01-01', '2024-01-03')
        
        if result['success']:
            # Check that return is calculated correctly
            initial_capital = result['initial_capital']
            final_value = result['final_value']
            calculated_return = (final_value - initial_capital) / initial_capital
            
            assert abs(result['total_return'] - calculated_return) < 0.001
            
            # Check that trade count is reasonable
            assert result['total_trades'] >= 0
    
    def test_config_loading(self):
        """Test configuration loading functionality"""
        # Test with valid config
        with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as tmp_file:
            config_content = """
data_source:
  type: 'synthetic'
  path: 'data/market_data.csv'

trading:
  symbol: 'AAPL'
  timeframe: '1min'
  capital: 100000

risk:
  max_position: 100
  max_drawdown: 0.05

execution:
  broker_api: 'paper'
  order_size: 10
"""
            tmp_file.write(config_content)
            tmp_file.flush()
            
            try:
                config = load_config(tmp_file.name)
                
                assert config['data_source']['type'] == 'synthetic'
                assert config['trading']['symbol'] == 'AAPL'
                assert config['trading']['capital'] == 100000
                
            finally:
                os.unlink(tmp_file.name)
    
    def test_system_scalability(self, config):
        """Test system scalability with larger datasets"""
        # Test with larger synthetic dataset
        test_config = config.copy()
        test_config['synthetic_data']['base_price'] = 200.0
        test_config['synthetic_data']['volatility'] = 0.03
        
        result = run(test_config)
        
        assert result['success'] == True
        assert result['data_loaded'] == True
        assert result['signal_generated'] == True
        
        # Check execution time is reasonable
        assert result['execution_time'] < 30  # Should complete within 30 seconds