Update app.py
Browse files
app.py
CHANGED
@@ -27,9 +27,16 @@ from GoogleNews import GoogleNews
|
|
27 |
from bs4 import BeautifulSoup
|
28 |
import requests
|
29 |
from urllib.parse import urlparse, urlunparse
|
30 |
-
from
|
|
|
31 |
from datetime import datetime
|
32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
st.set_page_config(layout="wide")
|
34 |
|
35 |
GOOGLE_API_KEY=os.environ['GOOGLE_API_KEY']
|
@@ -456,23 +463,27 @@ if activities == "Symbol Analysis":
|
|
456 |
st.text(res["output_text"])
|
457 |
elif activities=="News Sentiment":
|
458 |
|
459 |
-
|
460 |
-
|
|
|
|
|
|
|
|
|
461 |
|
462 |
-
log_df = pd.DataFrame(db.log_data)
|
463 |
-
if log_df.empty:
|
464 |
-
|
465 |
-
else:
|
466 |
-
|
467 |
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
|
472 |
-
|
473 |
-
|
474 |
-
|
475 |
-
|
476 |
|
477 |
|
478 |
|
@@ -559,6 +570,16 @@ elif activities=="News Sentiment":
|
|
559 |
# Save LLM output to FAISS DB
|
560 |
today = datetime.now()
|
561 |
db.store_top_picks(top_picks, today)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
562 |
|
563 |
# Layout
|
564 |
for stock in top_picks:
|
|
|
27 |
from bs4 import BeautifulSoup
|
28 |
import requests
|
29 |
from urllib.parse import urlparse, urlunparse
|
30 |
+
from langchain.embeddings import OpenAIEmbeddings
|
31 |
+
from stock_vector_db import *
|
32 |
from datetime import datetime
|
33 |
|
34 |
+
|
35 |
+
# Set your HF dataset repo and token
|
36 |
+
HF_REPO_ID = "rajat5ranjan/stock-insights"
|
37 |
+
HF_TOKEN = st.secrets["hf_token"] # Store your HF token safely in Streamlit secrets
|
38 |
+
|
39 |
+
|
40 |
st.set_page_config(layout="wide")
|
41 |
|
42 |
GOOGLE_API_KEY=os.environ['GOOGLE_API_KEY']
|
|
|
463 |
st.text(res["output_text"])
|
464 |
elif activities=="News Sentiment":
|
465 |
|
466 |
+
|
467 |
+
|
468 |
+
# Initialize embedding model and vector DB once per session (cache if needed)
|
469 |
+
embedding_model = OpenAIEmbeddings()
|
470 |
+
vector_db = HFVectorDB(hf_repo_id=HF_REPO_ID, hf_token=HF_TOKEN, embedding_model=embedding_model)
|
471 |
+
|
472 |
|
473 |
+
# log_df = pd.DataFrame(db.log_data)
|
474 |
+
# if log_df.empty:
|
475 |
+
# st.write("No log")
|
476 |
+
# else:
|
477 |
+
# log_df["date"] = pd.to_datetime(log_df["date"])
|
478 |
|
479 |
+
# with st.sidebar.expander("π History Filters"):
|
480 |
+
# selected_date = st.date_input("Pick a date", value=datetime.now().date())
|
481 |
+
# filtered = log_df[log_df["date"] == pd.to_datetime(selected_date)]
|
482 |
|
483 |
+
# st.write(f"Found {len(filtered)} entries on {selected_date}")
|
484 |
+
# for _, row in filtered.iterrows():
|
485 |
+
# st.markdown(f"**{row['company']} ({row['ticker']})** β {row['sentiment']} β {row['action']}")
|
486 |
+
# st.caption(f"Reason: {row['reason']}")
|
487 |
|
488 |
|
489 |
|
|
|
570 |
# Save LLM output to FAISS DB
|
571 |
today = datetime.now()
|
572 |
db.store_top_picks(top_picks, today)
|
573 |
+
|
574 |
+
# Add docs to vector DB and save/upload index
|
575 |
+
vector_db.add_documents(docs)
|
576 |
+
|
577 |
+
|
578 |
+
# Save top picks json for backtesting
|
579 |
+
save_top_picks_json(top_picks, today, path="top_picks.jsonl")
|
580 |
+
|
581 |
+
# Optionally add top picks as documents to vector DB
|
582 |
+
add_top_picks_to_vector_db(vector_db, top_picks, today)
|
583 |
|
584 |
# Layout
|
585 |
for stock in top_picks:
|