Sigrid De los Santos commited on
Commit
3e4bf85
Β·
1 Parent(s): 8143a2a

Remove remaining binary file for Hugging Face

Browse files
Files changed (2) hide show
  1. app.py +70 -52
  2. src/main.py +56 -68
app.py CHANGED
@@ -4,7 +4,6 @@ import tempfile
4
  import streamlit as st
5
  import pandas as pd
6
  from io import StringIO
7
- import contextlib
8
 
9
  # Add 'src' to Python path so we can import main.py
10
  sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))
@@ -24,7 +23,7 @@ topics_data = []
24
 
25
  with st.form("topics_form"):
26
  topic_count = st.number_input("How many topics?", min_value=1, max_value=10, value=1, step=1)
27
-
28
  for i in range(topic_count):
29
  col1, col2 = st.columns(2)
30
  with col1:
@@ -48,43 +47,33 @@ if submitted:
48
  df.to_csv(tmp_csv.name, index=False)
49
  csv_path = tmp_csv.name
50
 
51
- progress_placeholder = st.empty()
52
- log_output = st.empty()
53
- string_buffer = StringIO()
54
-
55
- def write_log(msg):
56
- print(msg) # Will go to final log
57
- progress_placeholder.markdown(f"πŸ”„ {msg}")
58
-
59
- with contextlib.redirect_stdout(string_buffer):
60
- write_log("πŸš€ Starting analysis...")
61
- output_path = run_pipeline(csv_path, tavily_api_key)
62
- write_log("βœ… Finished analysis.")
63
 
64
- logs = string_buffer.getvalue()
65
- progress_placeholder.empty() # Clear ephemeral log
66
- log_output.code(logs) # Show final full log
67
 
 
 
 
68
 
69
- if output_path and isinstance(output_path, list):
70
- st.success("βœ… Analysis complete!")
71
-
72
- for path in output_path:
73
- if os.path.exists(path):
74
- with open(path, 'r', encoding='utf-8') as file:
75
- html_content = file.read()
76
- filename = os.path.basename(path)
77
-
78
- st.download_button(
79
- label=f"πŸ“₯ Download {filename}",
80
- data=html_content,
81
- file_name=filename,
82
- mime="text/html"
83
- )
84
- st.components.v1.html(html_content, height=600, scrolling=True)
85
- else:
86
- st.error("❌ No reports were generated.")
87
 
 
 
 
 
 
 
 
 
 
 
 
88
 
89
 
90
  # import os
@@ -92,12 +81,15 @@ if submitted:
92
  # import tempfile
93
  # import streamlit as st
94
  # import pandas as pd
 
 
95
 
96
  # # Add 'src' to Python path so we can import main.py
97
  # sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))
98
  # from main import run_pipeline
99
 
100
- # st.title("AI-Powered Investing News Analyzer")
 
101
 
102
  # # === API Key Input ===
103
  # st.subheader("πŸ” API Keys")
@@ -105,45 +97,71 @@ if submitted:
105
  # tavily_api_key = st.text_input("Tavily API Key", type="password").strip()
106
 
107
  # # === Topic Input ===
108
- # st.subheader("πŸ“° Topics of Interest")
109
  # topics_data = []
110
 
111
  # with st.form("topics_form"):
112
- # topic_count = st.number_input("How many topics do you want to analyze?", min_value=1, max_value=10, step=1, value=1)
113
-
114
  # for i in range(topic_count):
115
  # col1, col2 = st.columns(2)
116
  # with col1:
117
  # topic = st.text_input(f"Topic {i+1}", key=f"topic_{i}")
118
  # with col2:
119
- # timespan = st.number_input(f"Timespan (days) for Topic {i+1}", min_value=1, max_value=30, value=7, key=f"days_{i}")
120
- # topics_data.append({"topic": topic, "timespan_days": timespan})
121
 
122
- # submitted = st.form_submit_button("Analyze Topics")
123
 
124
- # # === Run pipeline on submit ===
125
  # if submitted:
126
  # if not openai_api_key or not tavily_api_key or not all([td['topic'] for td in topics_data]):
127
  # st.warning("Please fill in all fields.")
128
  # else:
129
- # # Set environment variables so downstream modules can use them
130
  # os.environ["OPENAI_API_KEY"] = openai_api_key
131
  # os.environ["TAVILY_API_KEY"] = tavily_api_key
132
 
133
- # # Save user topics to temp CSV
134
  # df = pd.DataFrame(topics_data)
135
  # with tempfile.NamedTemporaryFile(delete=False, suffix=".csv") as tmp_csv:
136
  # df.to_csv(tmp_csv.name, index=False)
137
  # csv_path = tmp_csv.name
138
 
139
- # with st.spinner("Running analysis..."):
 
 
 
 
 
 
 
 
 
140
  # output_path = run_pipeline(csv_path, tavily_api_key)
 
 
 
 
 
 
141
 
142
- # if os.path.exists(output_path):
143
  # st.success("βœ… Analysis complete!")
144
- # with open(output_path, 'r', encoding='utf-8') as file:
145
- # html_content = file.read()
146
- # st.download_button("πŸ“₯ Download HTML Report", html_content, file_name="news_report.html", mime="text/html")
147
- # st.components.v1.html(html_content, height=600, scrolling=True)
 
 
 
 
 
 
 
 
 
 
148
  # else:
149
- # st.error("❌ Something went wrong during the analysis.")
 
 
 
 
4
  import streamlit as st
5
  import pandas as pd
6
  from io import StringIO
 
7
 
8
  # Add 'src' to Python path so we can import main.py
9
  sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))
 
23
 
24
  with st.form("topics_form"):
25
  topic_count = st.number_input("How many topics?", min_value=1, max_value=10, value=1, step=1)
26
+
27
  for i in range(topic_count):
28
  col1, col2 = st.columns(2)
29
  with col1:
 
47
  df.to_csv(tmp_csv.name, index=False)
48
  csv_path = tmp_csv.name
49
 
50
+ progress_box = st.empty()
 
 
 
 
 
 
 
 
 
 
 
51
 
52
+ def show_progress(msg):
53
+ progress_box.markdown(f"⏳ {msg}")
 
54
 
55
+ try:
56
+ output_path = run_pipeline(csv_path, tavily_api_key, progress_callback=show_progress)
57
+ progress_box.success("βœ… Analysis complete!")
58
 
59
+ if output_path and isinstance(output_path, list):
60
+ for path in output_path:
61
+ if os.path.exists(path):
62
+ with open(path, 'r', encoding='utf-8') as file:
63
+ html_content = file.read()
64
+ filename = os.path.basename(path)
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
+ st.download_button(
67
+ label=f"πŸ“₯ Download {filename}",
68
+ data=html_content,
69
+ file_name=filename,
70
+ mime="text/html"
71
+ )
72
+ st.components.v1.html(html_content, height=600, scrolling=True)
73
+ else:
74
+ st.error("❌ No reports were generated.")
75
+ except Exception as e:
76
+ progress_box.error(f"❌ Error: {e}")
77
 
78
 
79
  # import os
 
81
  # import tempfile
82
  # import streamlit as st
83
  # import pandas as pd
84
+ # from io import StringIO
85
+ # import contextlib
86
 
87
  # # Add 'src' to Python path so we can import main.py
88
  # sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))
89
  # from main import run_pipeline
90
 
91
+ # st.set_page_config(page_title="πŸ“° AI News Analyzer", layout="wide")
92
+ # st.title("🧠 AI-Powered Investing News Analyzer")
93
 
94
  # # === API Key Input ===
95
  # st.subheader("πŸ” API Keys")
 
97
  # tavily_api_key = st.text_input("Tavily API Key", type="password").strip()
98
 
99
  # # === Topic Input ===
100
+ # st.subheader("πŸ“ˆ Topics of Interest")
101
  # topics_data = []
102
 
103
  # with st.form("topics_form"):
104
+ # topic_count = st.number_input("How many topics?", min_value=1, max_value=10, value=1, step=1)
105
+
106
  # for i in range(topic_count):
107
  # col1, col2 = st.columns(2)
108
  # with col1:
109
  # topic = st.text_input(f"Topic {i+1}", key=f"topic_{i}")
110
  # with col2:
111
+ # days = st.number_input(f"Timespan (days)", min_value=1, max_value=30, value=7, key=f"days_{i}")
112
+ # topics_data.append({"topic": topic, "timespan_days": days})
113
 
114
+ # submitted = st.form_submit_button("Run Analysis")
115
 
116
+ # # === Submission logic ===
117
  # if submitted:
118
  # if not openai_api_key or not tavily_api_key or not all([td['topic'] for td in topics_data]):
119
  # st.warning("Please fill in all fields.")
120
  # else:
 
121
  # os.environ["OPENAI_API_KEY"] = openai_api_key
122
  # os.environ["TAVILY_API_KEY"] = tavily_api_key
123
 
 
124
  # df = pd.DataFrame(topics_data)
125
  # with tempfile.NamedTemporaryFile(delete=False, suffix=".csv") as tmp_csv:
126
  # df.to_csv(tmp_csv.name, index=False)
127
  # csv_path = tmp_csv.name
128
 
129
+ # progress_placeholder = st.empty()
130
+ # log_output = st.empty()
131
+ # string_buffer = StringIO()
132
+
133
+ # def write_log(msg):
134
+ # print(msg) # Will go to final log
135
+ # progress_placeholder.markdown(f"πŸ”„ {msg}")
136
+
137
+ # with contextlib.redirect_stdout(string_buffer):
138
+ # write_log("πŸš€ Starting analysis...")
139
  # output_path = run_pipeline(csv_path, tavily_api_key)
140
+ # write_log("βœ… Finished analysis.")
141
+
142
+ # logs = string_buffer.getvalue()
143
+ # progress_placeholder.empty() # Clear ephemeral log
144
+ # log_output.code(logs) # Show final full log
145
+
146
 
147
+ # if output_path and isinstance(output_path, list):
148
  # st.success("βœ… Analysis complete!")
149
+
150
+ # for path in output_path:
151
+ # if os.path.exists(path):
152
+ # with open(path, 'r', encoding='utf-8') as file:
153
+ # html_content = file.read()
154
+ # filename = os.path.basename(path)
155
+
156
+ # st.download_button(
157
+ # label=f"πŸ“₯ Download {filename}",
158
+ # data=html_content,
159
+ # file_name=filename,
160
+ # mime="text/html"
161
+ # )
162
+ # st.components.v1.html(html_content, height=600, scrolling=True)
163
  # else:
164
+ # st.error("❌ No reports were generated.")
165
+
166
+
167
+
src/main.py CHANGED
@@ -2,15 +2,13 @@ import os
2
  import sys
3
  from datetime import datetime
4
  from dotenv import load_dotenv
 
5
 
6
  from image_search import search_unsplash_image
7
  from md_html import convert_single_md_to_html as convert_md_to_html
8
  from news_analysis import fetch_deep_news, generate_value_investor_report
9
-
10
- import pandas as pd
11
  from csv_utils import detect_changes
12
 
13
-
14
  # Setup paths
15
  BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # one level up from src/
16
  DATA_DIR = os.path.join(BASE_DIR, "data")
@@ -32,14 +30,16 @@ def build_metrics_box(topic, num_articles):
32
  >
33
  """
34
 
35
- def run_value_investing_analysis(csv_path):
36
  current_df = pd.read_csv(csv_path)
37
  prev_path = os.path.join(BASE_DIR, "investing_topics_prev.csv")
 
38
  if os.path.exists(prev_path):
39
  previous_df = pd.read_csv(prev_path)
40
  changed_df = detect_changes(current_df, previous_df)
41
  if changed_df.empty:
42
- print("βœ… No changes detected. Skipping processing.")
 
43
  return []
44
  else:
45
  changed_df = current_df
@@ -49,20 +49,24 @@ def run_value_investing_analysis(csv_path):
49
  for _, row in changed_df.iterrows():
50
  topic = row.get("topic")
51
  timespan = row.get("timespan_days", 7)
52
- print(f"\nπŸ” Processing: {topic} ({timespan} days)")
 
 
53
 
54
  news = fetch_deep_news(topic, timespan)
55
  if not news:
56
- print(f"⚠️ No news found for: {topic}")
 
57
  continue
58
 
59
- report_body = generate_value_investor_report(topic, news)
60
- from image_search import search_unsplash_image
61
 
62
- # Later inside your loop
63
- image_url, image_credit = search_unsplash_image(topic)
64
 
65
- #image_url, image_credit = search_unsplash_image(topic, os.getenv("OPENAI_API_KEY"))
 
 
66
 
67
  metrics_md = build_metrics_box(topic, len(news))
68
  full_md = metrics_md + report_body
@@ -77,76 +81,67 @@ def run_value_investing_analysis(csv_path):
77
  filepath = os.path.join(DATA_DIR, filename)
78
  counter += 1
79
 
 
 
 
80
  with open(filepath, "w", encoding="utf-8") as f:
81
  f.write(full_md)
82
 
83
  new_md_files.append(filepath)
84
 
85
- print(f"βœ… Markdown saved to: {DATA_DIR}")
 
 
86
  current_df.to_csv(prev_path, index=False)
87
  return new_md_files
88
 
89
-
90
- def run_pipeline(csv_path, tavily_api_key):
91
  os.environ["TAVILY_API_KEY"] = tavily_api_key
92
 
93
- new_md_files = run_value_investing_analysis(csv_path)
94
  new_html_paths = []
95
 
96
  for md_path in new_md_files:
 
 
 
97
  convert_md_to_html(md_path, HTML_DIR)
98
  html_path = os.path.join(HTML_DIR, os.path.basename(md_path).replace(".md", ".html"))
99
  new_html_paths.append(html_path)
100
 
101
  return new_html_paths
102
 
103
-
104
  if __name__ == "__main__":
105
  md_files = run_value_investing_analysis(CSV_PATH)
106
  for md in md_files:
107
  convert_md_to_html(md, HTML_DIR)
108
  print(f"🌐 All reports converted to HTML at: {HTML_DIR}")
109
 
110
-
111
  # import os
112
  # import sys
113
  # from datetime import datetime
114
  # from dotenv import load_dotenv
115
 
116
- # #rom news_analysis import load_csv, fetch_deep_news, generate_value_investor_report
117
  # from image_search import search_unsplash_image
118
- # from md_html import convert_md_folder_to_html
119
  # from md_html import convert_single_md_to_html as convert_md_to_html
120
-
121
-
122
  # from news_analysis import fetch_deep_news, generate_value_investor_report
123
 
124
  # import pandas as pd
125
  # from csv_utils import detect_changes
126
 
127
 
128
- # # Adds the absolute path of /external to your module path
129
- # BASE_DIR = os.path.dirname(os.path.abspath(__file__))
130
- # EXTERNAL_PATH = os.path.join(BASE_DIR, "external")
131
- # if EXTERNAL_PATH not in sys.path:
132
- # sys.path.append(EXTERNAL_PATH)
133
-
134
- # # Load .env
135
- # load_dotenv()
136
-
137
- # # === Base Folder Setup ===
138
  # BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # one level up from src/
139
  # DATA_DIR = os.path.join(BASE_DIR, "data")
140
  # HTML_DIR = os.path.join(BASE_DIR, "html")
141
  # CSV_PATH = os.path.join(BASE_DIR, "investing_topics.csv")
142
 
143
-
144
-
145
- # # Ensure output folders exist
146
  # os.makedirs(DATA_DIR, exist_ok=True)
147
  # os.makedirs(HTML_DIR, exist_ok=True)
148
 
149
- # # === Metrics Block ===
 
 
150
  # def build_metrics_box(topic, num_articles):
151
  # now = datetime.now().strftime("%Y-%m-%d %H:%M")
152
  # return f"""
@@ -156,20 +151,20 @@ if __name__ == "__main__":
156
  # >
157
  # """
158
 
159
- # # === Main Logic ===
160
  # def run_value_investing_analysis(csv_path):
161
  # current_df = pd.read_csv(csv_path)
162
-
163
  # prev_path = os.path.join(BASE_DIR, "investing_topics_prev.csv")
164
  # if os.path.exists(prev_path):
165
  # previous_df = pd.read_csv(prev_path)
166
  # changed_df = detect_changes(current_df, previous_df)
167
  # if changed_df.empty:
168
  # print("βœ… No changes detected. Skipping processing.")
169
- # return
170
  # else:
171
  # changed_df = current_df
172
 
 
 
173
  # for _, row in changed_df.iterrows():
174
  # topic = row.get("topic")
175
  # timespan = row.get("timespan_days", 7)
@@ -181,7 +176,13 @@ if __name__ == "__main__":
181
  # continue
182
 
183
  # report_body = generate_value_investor_report(topic, news)
 
 
 
184
  # image_url, image_credit = search_unsplash_image(topic)
 
 
 
185
  # metrics_md = build_metrics_box(topic, len(news))
186
  # full_md = metrics_md + report_body
187
 
@@ -198,44 +199,31 @@ if __name__ == "__main__":
198
  # with open(filepath, "w", encoding="utf-8") as f:
199
  # f.write(full_md)
200
 
 
 
201
  # print(f"βœ… Markdown saved to: {DATA_DIR}")
202
- # current_df.to_csv(prev_path, index=False) # Save current as previous for next run
 
203
 
204
- # #convert_md_folder_to_html(DATA_DIR, HTML_DIR)
205
- # #print(f"🌐 All reports converted to HTML at: {HTML_DIR}")
206
 
207
- # # === Streamlit Integration Wrapper ===
208
  # def run_pipeline(csv_path, tavily_api_key):
209
- # """
210
- # Runs the full analysis pipeline for Streamlit.
211
-
212
- # Returns:
213
- # str: Path to the generated HTML report.
214
- # """
215
  # os.environ["TAVILY_API_KEY"] = tavily_api_key
216
 
217
- # run_value_investing_analysis(csv_path)
 
218
 
219
- # # Combine all generated markdown into one file
220
- # combined_md_path = os.path.join(DATA_DIR, "combined_report.md")
221
- # with open(combined_md_path, "w", encoding="utf-8") as outfile:
222
- # for fname in os.listdir(DATA_DIR):
223
- # if fname.endswith(".md"):
224
- # with open(os.path.join(DATA_DIR, fname), "r", encoding="utf-8") as f:
225
- # outfile.write(f.read() + "\n\n---\n\n")
226
 
227
- # # Convert to HTML
228
- # # html_output_path = os.path.join(HTML_DIR, "news_report.html")
229
- # # convert_md_to_html(combined_md_path, html_output_path)
230
- # convert_md_to_html(combined_md_path, HTML_DIR)
231
- # html_output_path = os.path.join(HTML_DIR, "combined_report.html")
232
 
233
 
234
- # return html_output_path
235
-
236
-
237
- # # === Run ===
238
  # if __name__ == "__main__":
239
- # run_value_investing_analysis(CSV_PATH)
240
- # convert_md_folder_to_html(DATA_DIR, HTML_DIR)
 
241
  # print(f"🌐 All reports converted to HTML at: {HTML_DIR}")
 
 
 
2
  import sys
3
  from datetime import datetime
4
  from dotenv import load_dotenv
5
+ import pandas as pd
6
 
7
  from image_search import search_unsplash_image
8
  from md_html import convert_single_md_to_html as convert_md_to_html
9
  from news_analysis import fetch_deep_news, generate_value_investor_report
 
 
10
  from csv_utils import detect_changes
11
 
 
12
  # Setup paths
13
  BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # one level up from src/
14
  DATA_DIR = os.path.join(BASE_DIR, "data")
 
30
  >
31
  """
32
 
33
+ def run_value_investing_analysis(csv_path, progress_callback=None):
34
  current_df = pd.read_csv(csv_path)
35
  prev_path = os.path.join(BASE_DIR, "investing_topics_prev.csv")
36
+
37
  if os.path.exists(prev_path):
38
  previous_df = pd.read_csv(prev_path)
39
  changed_df = detect_changes(current_df, previous_df)
40
  if changed_df.empty:
41
+ if progress_callback:
42
+ progress_callback("βœ… No changes detected. Skipping processing.")
43
  return []
44
  else:
45
  changed_df = current_df
 
49
  for _, row in changed_df.iterrows():
50
  topic = row.get("topic")
51
  timespan = row.get("timespan_days", 7)
52
+
53
+ if progress_callback:
54
+ progress_callback(f"πŸ” Processing: {topic} ({timespan} days)")
55
 
56
  news = fetch_deep_news(topic, timespan)
57
  if not news:
58
+ if progress_callback:
59
+ progress_callback(f"⚠️ No news found for: {topic}")
60
  continue
61
 
62
+ if progress_callback:
63
+ progress_callback(f"🧠 Analyzing news for: {topic}")
64
 
65
+ report_body = generate_value_investor_report(topic, news)
 
66
 
67
+ # Use placeholder image instead of API call
68
+ image_url = "https://via.placeholder.com/1281x721?text=No+Image"
69
+ image_credit = "Image unavailable"
70
 
71
  metrics_md = build_metrics_box(topic, len(news))
72
  full_md = metrics_md + report_body
 
81
  filepath = os.path.join(DATA_DIR, filename)
82
  counter += 1
83
 
84
+ if progress_callback:
85
+ progress_callback(f"πŸ“ Saving markdown for: {topic}")
86
+
87
  with open(filepath, "w", encoding="utf-8") as f:
88
  f.write(full_md)
89
 
90
  new_md_files.append(filepath)
91
 
92
+ if progress_callback:
93
+ progress_callback(f"βœ… Markdown reports saved to: `{DATA_DIR}`")
94
+
95
  current_df.to_csv(prev_path, index=False)
96
  return new_md_files
97
 
98
+ def run_pipeline(csv_path, tavily_api_key, progress_callback=None):
 
99
  os.environ["TAVILY_API_KEY"] = tavily_api_key
100
 
101
+ new_md_files = run_value_investing_analysis(csv_path, progress_callback)
102
  new_html_paths = []
103
 
104
  for md_path in new_md_files:
105
+ if progress_callback:
106
+ progress_callback(f"🌐 Converting to HTML: {os.path.basename(md_path)}")
107
+
108
  convert_md_to_html(md_path, HTML_DIR)
109
  html_path = os.path.join(HTML_DIR, os.path.basename(md_path).replace(".md", ".html"))
110
  new_html_paths.append(html_path)
111
 
112
  return new_html_paths
113
 
 
114
  if __name__ == "__main__":
115
  md_files = run_value_investing_analysis(CSV_PATH)
116
  for md in md_files:
117
  convert_md_to_html(md, HTML_DIR)
118
  print(f"🌐 All reports converted to HTML at: {HTML_DIR}")
119
 
 
120
  # import os
121
  # import sys
122
  # from datetime import datetime
123
  # from dotenv import load_dotenv
124
 
 
125
  # from image_search import search_unsplash_image
 
126
  # from md_html import convert_single_md_to_html as convert_md_to_html
 
 
127
  # from news_analysis import fetch_deep_news, generate_value_investor_report
128
 
129
  # import pandas as pd
130
  # from csv_utils import detect_changes
131
 
132
 
133
+ # # Setup paths
 
 
 
 
 
 
 
 
 
134
  # BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # one level up from src/
135
  # DATA_DIR = os.path.join(BASE_DIR, "data")
136
  # HTML_DIR = os.path.join(BASE_DIR, "html")
137
  # CSV_PATH = os.path.join(BASE_DIR, "investing_topics.csv")
138
 
 
 
 
139
  # os.makedirs(DATA_DIR, exist_ok=True)
140
  # os.makedirs(HTML_DIR, exist_ok=True)
141
 
142
+ # # Load .env
143
+ # load_dotenv()
144
+
145
  # def build_metrics_box(topic, num_articles):
146
  # now = datetime.now().strftime("%Y-%m-%d %H:%M")
147
  # return f"""
 
151
  # >
152
  # """
153
 
 
154
  # def run_value_investing_analysis(csv_path):
155
  # current_df = pd.read_csv(csv_path)
 
156
  # prev_path = os.path.join(BASE_DIR, "investing_topics_prev.csv")
157
  # if os.path.exists(prev_path):
158
  # previous_df = pd.read_csv(prev_path)
159
  # changed_df = detect_changes(current_df, previous_df)
160
  # if changed_df.empty:
161
  # print("βœ… No changes detected. Skipping processing.")
162
+ # return []
163
  # else:
164
  # changed_df = current_df
165
 
166
+ # new_md_files = []
167
+
168
  # for _, row in changed_df.iterrows():
169
  # topic = row.get("topic")
170
  # timespan = row.get("timespan_days", 7)
 
176
  # continue
177
 
178
  # report_body = generate_value_investor_report(topic, news)
179
+ # from image_search import search_unsplash_image
180
+
181
+ # # Later inside your loop
182
  # image_url, image_credit = search_unsplash_image(topic)
183
+
184
+ # #image_url, image_credit = search_unsplash_image(topic, os.getenv("OPENAI_API_KEY"))
185
+
186
  # metrics_md = build_metrics_box(topic, len(news))
187
  # full_md = metrics_md + report_body
188
 
 
199
  # with open(filepath, "w", encoding="utf-8") as f:
200
  # f.write(full_md)
201
 
202
+ # new_md_files.append(filepath)
203
+
204
  # print(f"βœ… Markdown saved to: {DATA_DIR}")
205
+ # current_df.to_csv(prev_path, index=False)
206
+ # return new_md_files
207
 
 
 
208
 
 
209
  # def run_pipeline(csv_path, tavily_api_key):
 
 
 
 
 
 
210
  # os.environ["TAVILY_API_KEY"] = tavily_api_key
211
 
212
+ # new_md_files = run_value_investing_analysis(csv_path)
213
+ # new_html_paths = []
214
 
215
+ # for md_path in new_md_files:
216
+ # convert_md_to_html(md_path, HTML_DIR)
217
+ # html_path = os.path.join(HTML_DIR, os.path.basename(md_path).replace(".md", ".html"))
218
+ # new_html_paths.append(html_path)
 
 
 
219
 
220
+ # return new_html_paths
 
 
 
 
221
 
222
 
 
 
 
 
223
  # if __name__ == "__main__":
224
+ # md_files = run_value_investing_analysis(CSV_PATH)
225
+ # for md in md_files:
226
+ # convert_md_to_html(md, HTML_DIR)
227
  # print(f"🌐 All reports converted to HTML at: {HTML_DIR}")
228
+
229
+