Merge pull request #12 from EAName/main
Browse files- env.example β .env.example +0 -0
- .gitignore +81 -37
- agentic_ai_system/__init__.py +0 -0
- agentic_ai_system/finrl_agent.py +27 -29
- data/.gitkeep +0 -0
- finrl_demo.py +49 -38
- models/.gitkeep +0 -0
- plots/.gitkeep +0 -0
- CURSOR_PR_REVIEW_GUIDE.md β scripts/CURSOR_PR_REVIEW_GUIDE.md +0 -0
- HUGGINGFACE_PROTECTION.md β scripts/HUGGINGFACE_PROTECTION.md +0 -0
- review_dependabot_prs.sh β scripts/review_dependabot_prs.sh +0 -0
- review_log.txt β scripts/review_log.txt +0 -0
- setup_branch_protection.sh β scripts/setup_branch_protection.sh +0 -0
env.example β .env.example
RENAMED
File without changes
|
.gitignore
CHANGED
@@ -1,8 +1,12 @@
|
|
1 |
-
#
|
2 |
__pycache__/
|
3 |
*.py[cod]
|
4 |
*$py.class
|
|
|
|
|
5 |
*.so
|
|
|
|
|
6 |
.Python
|
7 |
build/
|
8 |
develop-eggs/
|
@@ -15,13 +19,14 @@ lib64/
|
|
15 |
parts/
|
16 |
sdist/
|
17 |
var/
|
18 |
-
wheels/
|
19 |
*.egg-info/
|
20 |
.installed.cfg
|
21 |
*.egg
|
22 |
MANIFEST
|
23 |
|
24 |
# PyInstaller
|
|
|
|
|
25 |
*.manifest
|
26 |
*.spec
|
27 |
|
@@ -32,6 +37,7 @@ pip-delete-this-directory.txt
|
|
32 |
# Unit test / coverage reports
|
33 |
htmlcov/
|
34 |
.tox/
|
|
|
35 |
.coverage
|
36 |
.coverage.*
|
37 |
.cache
|
@@ -41,61 +47,99 @@ coverage.xml
|
|
41 |
.hypothesis/
|
42 |
.pytest_cache/
|
43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
# Jupyter Notebook
|
45 |
.ipynb_checkpoints
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
# pyenv
|
48 |
.python-version
|
49 |
|
50 |
-
#
|
51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
-
#
|
54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
# Environments
|
57 |
.env
|
58 |
-
.
|
59 |
-
|
60 |
venv/
|
61 |
ENV/
|
62 |
env.bak/
|
63 |
venv.bak/
|
64 |
|
65 |
-
#
|
66 |
-
|
67 |
-
.
|
68 |
-
|
69 |
-
# Rope project settings
|
70 |
-
.ropeproject
|
71 |
-
|
72 |
-
# mkdocs documentation
|
73 |
-
/site
|
74 |
|
75 |
-
#
|
76 |
-
.mypy_cache/
|
77 |
-
.dmypy.json
|
78 |
-
dmypy.json
|
79 |
-
|
80 |
-
# Project specific
|
81 |
logs/
|
82 |
-
data/
|
83 |
*.log
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
*.csv
|
85 |
-
*.
|
|
|
|
|
86 |
|
87 |
-
#
|
88 |
-
.
|
89 |
-
.
|
90 |
-
*.swp
|
91 |
-
*.swo
|
92 |
-
*~
|
93 |
|
94 |
-
#
|
95 |
.DS_Store
|
96 |
-
.DS_Store?
|
97 |
-
._*
|
98 |
-
.Spotlight-V100
|
99 |
-
.Trashes
|
100 |
-
ehthumbs.db
|
101 |
Thumbs.db
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
__pycache__/
|
3 |
*.py[cod]
|
4 |
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
.Python
|
11 |
build/
|
12 |
develop-eggs/
|
|
|
19 |
parts/
|
20 |
sdist/
|
21 |
var/
|
|
|
22 |
*.egg-info/
|
23 |
.installed.cfg
|
24 |
*.egg
|
25 |
MANIFEST
|
26 |
|
27 |
# PyInstaller
|
28 |
+
# Usually these files are written by a python script from a template
|
29 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
30 |
*.manifest
|
31 |
*.spec
|
32 |
|
|
|
37 |
# Unit test / coverage reports
|
38 |
htmlcov/
|
39 |
.tox/
|
40 |
+
.nox/
|
41 |
.coverage
|
42 |
.coverage.*
|
43 |
.cache
|
|
|
47 |
.hypothesis/
|
48 |
.pytest_cache/
|
49 |
|
50 |
+
# Translations
|
51 |
+
*.mo
|
52 |
+
*.pot
|
53 |
+
|
54 |
+
# Django stuff:
|
55 |
+
*.log
|
56 |
+
local_settings.py
|
57 |
+
|
58 |
+
# Flask stuff:
|
59 |
+
instance/
|
60 |
+
.webassets-cache
|
61 |
+
|
62 |
+
# Scrapy stuff:
|
63 |
+
.scrapy
|
64 |
+
|
65 |
+
# Sphinx documentation
|
66 |
+
/docs/_build/
|
67 |
+
|
68 |
+
# PyBuilder
|
69 |
+
.target/
|
70 |
+
|
71 |
# Jupyter Notebook
|
72 |
.ipynb_checkpoints
|
73 |
+
notebooks/*.ipynb_checkpoints
|
74 |
+
|
75 |
+
# IPython
|
76 |
+
profile_default/
|
77 |
+
ipython_config.py
|
78 |
|
79 |
# pyenv
|
80 |
.python-version
|
81 |
|
82 |
+
# pipenv
|
83 |
+
Pipfile.lock
|
84 |
+
|
85 |
+
# poetry
|
86 |
+
poetry.lock
|
87 |
+
|
88 |
+
# mypy
|
89 |
+
.mypy_cache/
|
90 |
+
.dmypy.json
|
91 |
+
|
92 |
+
# Pyre type checker
|
93 |
+
.pyre/
|
94 |
|
95 |
+
# pytype static type analyzer
|
96 |
+
.pytype/
|
97 |
+
|
98 |
+
# Cython debug symbols
|
99 |
+
cython_debug/
|
100 |
+
|
101 |
+
# VS Code
|
102 |
+
.vscode/
|
103 |
|
104 |
# Environments
|
105 |
.env
|
106 |
+
.env.*
|
107 |
+
.venv/
|
108 |
venv/
|
109 |
ENV/
|
110 |
env.bak/
|
111 |
venv.bak/
|
112 |
|
113 |
+
# Docker
|
114 |
+
*.pid
|
115 |
+
*.pid.lock
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
|
117 |
+
# Logs
|
|
|
|
|
|
|
|
|
|
|
118 |
logs/
|
|
|
119 |
*.log
|
120 |
+
|
121 |
+
# Data, models, and plots (keep .gitkeep)
|
122 |
+
data/*
|
123 |
+
!data/.gitkeep
|
124 |
+
models/*
|
125 |
+
!models/.gitkeep
|
126 |
+
plots/*
|
127 |
+
!plots/.gitkeep
|
128 |
+
|
129 |
+
# Ignore large files
|
130 |
+
*.h5
|
131 |
+
*.hdf5
|
132 |
+
*.pkl
|
133 |
+
*.joblib
|
134 |
*.csv
|
135 |
+
*.zip
|
136 |
+
*.tar.gz
|
137 |
+
*.ckpt
|
138 |
|
139 |
+
# Allow config and example env
|
140 |
+
!config.yaml
|
141 |
+
!.env.example
|
|
|
|
|
|
|
142 |
|
143 |
+
# Misc
|
144 |
.DS_Store
|
|
|
|
|
|
|
|
|
|
|
145 |
Thumbs.db
|
agentic_ai_system/__init__.py
ADDED
File without changes
|
agentic_ai_system/finrl_agent.py
CHANGED
@@ -18,6 +18,7 @@ import logging
|
|
18 |
from typing import Dict, List, Tuple, Optional, Any
|
19 |
from dataclasses import dataclass
|
20 |
import yaml
|
|
|
21 |
|
22 |
logger = logging.getLogger(__name__)
|
23 |
|
@@ -254,7 +255,18 @@ class FinRLAgent:
|
|
254 |
self.eval_env = None
|
255 |
self.callback = None
|
256 |
|
257 |
-
logger.info(f"Initializing FinRL agent with algorithm: {config.algorithm}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
258 |
|
259 |
def create_environment(self, data: pd.DataFrame, config: Dict[str, Any],
|
260 |
initial_balance: float = 100000, use_real_broker: bool = False) -> TradingEnvironment:
|
@@ -335,54 +347,35 @@ class FinRLAgent:
|
|
335 |
|
336 |
# Initialize model based on algorithm
|
337 |
if self.config.algorithm == "PPO":
|
|
|
338 |
self.model = PPO(
|
339 |
"MlpPolicy",
|
340 |
self.env,
|
341 |
-
|
342 |
-
batch_size=self.config.batch_size,
|
343 |
-
buffer_size=self.config.buffer_size,
|
344 |
-
learning_starts=self.config.learning_starts,
|
345 |
-
gamma=self.config.gamma,
|
346 |
-
train_freq=self.config.train_freq,
|
347 |
-
gradient_steps=self.config.gradient_steps,
|
348 |
-
verbose=self.config.verbose,
|
349 |
tensorboard_log=self.config.tensorboard_log
|
350 |
)
|
351 |
elif self.config.algorithm == "A2C":
|
|
|
352 |
self.model = A2C(
|
353 |
"MlpPolicy",
|
354 |
self.env,
|
355 |
-
|
356 |
-
gamma=self.config.gamma,
|
357 |
-
verbose=self.config.verbose,
|
358 |
tensorboard_log=self.config.tensorboard_log
|
359 |
)
|
360 |
elif self.config.algorithm == "DDPG":
|
|
|
361 |
self.model = DDPG(
|
362 |
"MlpPolicy",
|
363 |
self.env,
|
364 |
-
|
365 |
-
buffer_size=self.config.buffer_size,
|
366 |
-
learning_starts=self.config.learning_starts,
|
367 |
-
gamma=self.config.gamma,
|
368 |
-
tau=self.config.tau,
|
369 |
-
train_freq=self.config.train_freq,
|
370 |
-
gradient_steps=self.config.gradient_steps,
|
371 |
-
verbose=self.config.verbose,
|
372 |
tensorboard_log=self.config.tensorboard_log
|
373 |
)
|
374 |
elif self.config.algorithm == "TD3":
|
|
|
375 |
self.model = TD3(
|
376 |
"MlpPolicy",
|
377 |
self.env,
|
378 |
-
|
379 |
-
buffer_size=self.config.buffer_size,
|
380 |
-
learning_starts=self.config.learning_starts,
|
381 |
-
gamma=self.config.gamma,
|
382 |
-
tau=self.config.tau,
|
383 |
-
train_freq=self.config.train_freq,
|
384 |
-
gradient_steps=self.config.gradient_steps,
|
385 |
-
verbose=self.config.verbose,
|
386 |
tensorboard_log=self.config.tensorboard_log
|
387 |
)
|
388 |
else:
|
@@ -660,4 +653,9 @@ class FinRLAgent:
|
|
660 |
ema_fast = prices.ewm(span=fast).mean()
|
661 |
ema_slow = prices.ewm(span=slow).mean()
|
662 |
macd = ema_fast - ema_slow
|
663 |
-
return macd
|
|
|
|
|
|
|
|
|
|
|
|
18 |
from typing import Dict, List, Tuple, Optional, Any
|
19 |
from dataclasses import dataclass
|
20 |
import yaml
|
21 |
+
import inspect
|
22 |
|
23 |
logger = logging.getLogger(__name__)
|
24 |
|
|
|
255 |
self.eval_env = None
|
256 |
self.callback = None
|
257 |
|
258 |
+
logger.info(f"Initializing FinRL agent with algorithm: {self.config.algorithm}")
|
259 |
+
|
260 |
+
def _get_valid_kwargs(self, algo_class):
|
261 |
+
"""Return a dict of config fields valid for the given algorithm class, excluding tensorboard_log."""
|
262 |
+
sig = inspect.signature(algo_class.__init__)
|
263 |
+
valid_keys = set(sig.parameters.keys())
|
264 |
+
# Exclude 'self', 'policy', and 'tensorboard_log' which are always passed explicitly
|
265 |
+
valid_keys.discard('self')
|
266 |
+
valid_keys.discard('policy')
|
267 |
+
valid_keys.discard('tensorboard_log')
|
268 |
+
# Build kwargs from config dataclass
|
269 |
+
return {k: getattr(self.config, k) for k in self.config.__dataclass_fields__ if k in valid_keys}
|
270 |
|
271 |
def create_environment(self, data: pd.DataFrame, config: Dict[str, Any],
|
272 |
initial_balance: float = 100000, use_real_broker: bool = False) -> TradingEnvironment:
|
|
|
347 |
|
348 |
# Initialize model based on algorithm
|
349 |
if self.config.algorithm == "PPO":
|
350 |
+
algo_kwargs = self._get_valid_kwargs(PPO)
|
351 |
self.model = PPO(
|
352 |
"MlpPolicy",
|
353 |
self.env,
|
354 |
+
**algo_kwargs,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
355 |
tensorboard_log=self.config.tensorboard_log
|
356 |
)
|
357 |
elif self.config.algorithm == "A2C":
|
358 |
+
algo_kwargs = self._get_valid_kwargs(A2C)
|
359 |
self.model = A2C(
|
360 |
"MlpPolicy",
|
361 |
self.env,
|
362 |
+
**algo_kwargs,
|
|
|
|
|
363 |
tensorboard_log=self.config.tensorboard_log
|
364 |
)
|
365 |
elif self.config.algorithm == "DDPG":
|
366 |
+
algo_kwargs = self._get_valid_kwargs(DDPG)
|
367 |
self.model = DDPG(
|
368 |
"MlpPolicy",
|
369 |
self.env,
|
370 |
+
**algo_kwargs,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
371 |
tensorboard_log=self.config.tensorboard_log
|
372 |
)
|
373 |
elif self.config.algorithm == "TD3":
|
374 |
+
algo_kwargs = self._get_valid_kwargs(TD3)
|
375 |
self.model = TD3(
|
376 |
"MlpPolicy",
|
377 |
self.env,
|
378 |
+
**algo_kwargs,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
379 |
tensorboard_log=self.config.tensorboard_log
|
380 |
)
|
381 |
else:
|
|
|
653 |
ema_fast = prices.ewm(span=fast).mean()
|
654 |
ema_slow = prices.ewm(span=slow).mean()
|
655 |
macd = ema_fast - ema_slow
|
656 |
+
return macd
|
657 |
+
|
658 |
+
|
659 |
+
def create_finrl_agent_from_config(config: FinRLConfig) -> FinRLAgent:
|
660 |
+
"""Create a FinRL agent from configuration"""
|
661 |
+
return FinRLAgent(config)
|
data/.gitkeep
ADDED
File without changes
|
finrl_demo.py
CHANGED
@@ -127,15 +127,14 @@ def train_finrl_agent(config: dict, train_data: pd.DataFrame, test_data: pd.Data
|
|
127 |
logger.info("Starting FinRL agent training")
|
128 |
|
129 |
# Create FinRL agent
|
130 |
-
finrl_config = FinRLConfig(**config['finrl'])
|
131 |
agent = FinRLAgent(finrl_config)
|
132 |
|
133 |
# Train the agent
|
134 |
training_result = agent.train(
|
135 |
data=train_data,
|
136 |
-
|
137 |
-
|
138 |
-
eval_data=test_data
|
139 |
)
|
140 |
|
141 |
logger.info(f"Training completed: {training_result}")
|
@@ -149,27 +148,31 @@ def train_finrl_agent(config: dict, train_data: pd.DataFrame, test_data: pd.Data
|
|
149 |
return agent
|
150 |
|
151 |
|
152 |
-
def evaluate_agent(agent: FinRLAgent, test_data: pd.DataFrame) -> dict:
|
153 |
"""Evaluate the trained agent"""
|
154 |
logger.info("Evaluating FinRL agent")
|
155 |
|
156 |
# Evaluate on test data
|
157 |
-
evaluation_results = agent.evaluate(test_data)
|
158 |
|
159 |
logger.info(f"Evaluation results: {evaluation_results}")
|
160 |
|
161 |
return evaluation_results
|
162 |
|
163 |
|
164 |
-
def generate_predictions(agent: FinRLAgent, test_data: pd.DataFrame) -> list:
|
165 |
"""Generate trading predictions"""
|
166 |
logger.info("Generating trading predictions")
|
167 |
|
168 |
-
|
169 |
|
170 |
-
|
171 |
-
|
172 |
-
|
|
|
|
|
|
|
|
|
173 |
|
174 |
|
175 |
def plot_results(test_data: pd.DataFrame, predictions: list, evaluation_results: dict):
|
@@ -182,16 +185,17 @@ def plot_results(test_data: pd.DataFrame, predictions: list, evaluation_results:
|
|
182 |
# Plot 1: Price and predictions
|
183 |
axes[0].plot(test_data.index, test_data['close'], label='Close Price', alpha=0.7)
|
184 |
|
185 |
-
# Mark buy/sell signals
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
|
|
195 |
|
196 |
axes[0].set_title('Price Action and Trading Signals')
|
197 |
axes[0].set_ylabel('Price')
|
@@ -236,23 +240,30 @@ def print_summary(evaluation_results: dict, predictions: list):
|
|
236 |
print("FINRL TRADING SYSTEM SUMMARY")
|
237 |
print("="*60)
|
238 |
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
|
|
|
|
|
|
|
|
245 |
|
246 |
# Trading statistics
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
|
|
|
|
|
|
256 |
|
257 |
print("\n" + "="*60)
|
258 |
|
@@ -273,10 +284,10 @@ def main():
|
|
273 |
agent = train_finrl_agent(config, train_data, test_data)
|
274 |
|
275 |
# Evaluate agent
|
276 |
-
evaluation_results = evaluate_agent(agent, test_data)
|
277 |
|
278 |
# Generate predictions
|
279 |
-
predictions = generate_predictions(agent, test_data)
|
280 |
|
281 |
# Create visualizations
|
282 |
plot_results(test_data, predictions, evaluation_results)
|
|
|
127 |
logger.info("Starting FinRL agent training")
|
128 |
|
129 |
# Create FinRL agent
|
130 |
+
finrl_config = FinRLConfig(**{k: v for k, v in config['finrl'].items() if k in FinRLConfig.__dataclass_fields__})
|
131 |
agent = FinRLAgent(finrl_config)
|
132 |
|
133 |
# Train the agent
|
134 |
training_result = agent.train(
|
135 |
data=train_data,
|
136 |
+
config=config,
|
137 |
+
total_timesteps=config['finrl']['training']['total_timesteps']
|
|
|
138 |
)
|
139 |
|
140 |
logger.info(f"Training completed: {training_result}")
|
|
|
148 |
return agent
|
149 |
|
150 |
|
151 |
+
def evaluate_agent(agent: FinRLAgent, test_data: pd.DataFrame, config: dict) -> dict:
|
152 |
"""Evaluate the trained agent"""
|
153 |
logger.info("Evaluating FinRL agent")
|
154 |
|
155 |
# Evaluate on test data
|
156 |
+
evaluation_results = agent.evaluate(test_data, config)
|
157 |
|
158 |
logger.info(f"Evaluation results: {evaluation_results}")
|
159 |
|
160 |
return evaluation_results
|
161 |
|
162 |
|
163 |
+
def generate_predictions(agent: FinRLAgent, test_data: pd.DataFrame, config: dict) -> list:
|
164 |
"""Generate trading predictions"""
|
165 |
logger.info("Generating trading predictions")
|
166 |
|
167 |
+
prediction_results = agent.predict(test_data, config)
|
168 |
|
169 |
+
if prediction_results['success']:
|
170 |
+
predictions = prediction_results['actions']
|
171 |
+
logger.info(f"Generated {len(predictions)} predictions")
|
172 |
+
return predictions
|
173 |
+
else:
|
174 |
+
logger.error(f"Prediction failed: {prediction_results['error']}")
|
175 |
+
return []
|
176 |
|
177 |
|
178 |
def plot_results(test_data: pd.DataFrame, predictions: list, evaluation_results: dict):
|
|
|
185 |
# Plot 1: Price and predictions
|
186 |
axes[0].plot(test_data.index, test_data['close'], label='Close Price', alpha=0.7)
|
187 |
|
188 |
+
# Mark buy/sell signals only if predictions are available
|
189 |
+
if predictions:
|
190 |
+
buy_signals = [i for i, pred in enumerate(predictions) if pred == 2]
|
191 |
+
sell_signals = [i for i, pred in enumerate(predictions) if pred == 0]
|
192 |
+
|
193 |
+
if buy_signals:
|
194 |
+
axes[0].scatter(test_data.index[buy_signals], test_data['close'].iloc[buy_signals],
|
195 |
+
color='green', marker='^', s=100, label='Buy Signal', alpha=0.8)
|
196 |
+
if sell_signals:
|
197 |
+
axes[0].scatter(test_data.index[sell_signals], test_data['close'].iloc[sell_signals],
|
198 |
+
color='red', marker='v', s=100, label='Sell Signal', alpha=0.8)
|
199 |
|
200 |
axes[0].set_title('Price Action and Trading Signals')
|
201 |
axes[0].set_ylabel('Price')
|
|
|
240 |
print("FINRL TRADING SYSTEM SUMMARY")
|
241 |
print("="*60)
|
242 |
|
243 |
+
if evaluation_results.get('success', False):
|
244 |
+
print(f"Algorithm: {evaluation_results.get('algorithm', 'Unknown')}")
|
245 |
+
print(f"Total Return: {evaluation_results.get('total_return', 0):.2%}")
|
246 |
+
print(f"Final Portfolio Value: ${evaluation_results.get('final_portfolio_value', 0):,.2f}")
|
247 |
+
print(f"Total Reward: {evaluation_results.get('total_reward', 0):.4f}")
|
248 |
+
print(f"Sharpe Ratio: {evaluation_results.get('sharpe_ratio', 0):.4f}")
|
249 |
+
print(f"Number of Trading Steps: {evaluation_results.get('steps', 0)}")
|
250 |
+
print(f"Max Drawdown: {evaluation_results.get('max_drawdown', 0):.2%}")
|
251 |
+
else:
|
252 |
+
print(f"Evaluation failed: {evaluation_results.get('error', 'Unknown error')}")
|
253 |
|
254 |
# Trading statistics
|
255 |
+
if predictions:
|
256 |
+
buy_signals = sum(1 for pred in predictions if pred == 2)
|
257 |
+
sell_signals = sum(1 for pred in predictions if pred == 0)
|
258 |
+
hold_signals = sum(1 for pred in predictions if pred == 1)
|
259 |
+
|
260 |
+
print(f"\nTrading Signals:")
|
261 |
+
print(f" Buy signals: {buy_signals}")
|
262 |
+
print(f" Sell signals: {sell_signals}")
|
263 |
+
print(f" Hold signals: {hold_signals}")
|
264 |
+
print(f" Total signals: {len(predictions)}")
|
265 |
+
else:
|
266 |
+
print(f"\nNo trading predictions available")
|
267 |
|
268 |
print("\n" + "="*60)
|
269 |
|
|
|
284 |
agent = train_finrl_agent(config, train_data, test_data)
|
285 |
|
286 |
# Evaluate agent
|
287 |
+
evaluation_results = evaluate_agent(agent, test_data, config)
|
288 |
|
289 |
# Generate predictions
|
290 |
+
predictions = generate_predictions(agent, test_data, config)
|
291 |
|
292 |
# Create visualizations
|
293 |
plot_results(test_data, predictions, evaluation_results)
|
models/.gitkeep
ADDED
File without changes
|
plots/.gitkeep
ADDED
File without changes
|
CURSOR_PR_REVIEW_GUIDE.md β scripts/CURSOR_PR_REVIEW_GUIDE.md
RENAMED
File without changes
|
HUGGINGFACE_PROTECTION.md β scripts/HUGGINGFACE_PROTECTION.md
RENAMED
File without changes
|
review_dependabot_prs.sh β scripts/review_dependabot_prs.sh
RENAMED
File without changes
|
review_log.txt β scripts/review_log.txt
RENAMED
File without changes
|
setup_branch_protection.sh β scripts/setup_branch_protection.sh
RENAMED
File without changes
|