import streamlit as st from langchain_groq import ChatGroq import yfinance as yf # Initialize the ChatGroq model using the secret API key llm = ChatGroq(model_name="Llama3-8b-8192", api_key=st.secrets['groq_api_key']) # Initialize chat history in session state if "messages" not in st.session_state: st.session_state.messages = [{"role": "assistant", "content": "Hello! How can I assist you with stock information today?"}] # Display chat messages from history for message in st.session_state.messages: with st.chat_message(message["role"]): st.write(message["content"]) # Accept user input if prompt := st.chat_input("Ask me about stocks..."): # Display user message in chat message container with st.chat_message("user"): st.write(prompt) # Add user message to chat history st.session_state.messages.append({"role": "user", "content": prompt}) # Fetch stock data or generate response based on user input if "invest" in prompt.lower() or "should I invest" in prompt.lower(): company_name = prompt.split()[-1] # Assuming the last word is the ticker symbol or company name try: stock_data = yf.Ticker(company_name).info # Check if stock_data contains valid information if 'currentPrice' in stock_data: response = f"Here is the data for {company_name}:\n" response += f"Current Price: {stock_data.get('currentPrice', 'N/A')}\n" response += f"Market Cap: {stock_data.get('marketCap', 'N/A')}\n" response += f"PE Ratio: {stock_data.get('trailingPE', 'N/A')}\n" response += f"Dividend Yield: {stock_data.get('dividendYield', 'N/A')}\n" # Simple investment recommendation logic (this can be improved) if stock_data.get('trailingPE', 0) < 20: # Example condition for recommendation response += "\n**Recommendation:** Yes, consider investing!" else: response += "\n**Recommendation:** No, it might not be a good time to invest." else: response = f"Sorry, I couldn't find data for {company_name}. Please check the ticker symbol." except Exception as e: response = f"An error occurred while fetching data: {str(e)}" else: try: response = llm.invoke(prompt) # Use the LLM for general questions except Exception as e: response = f"An error occurred while processing your request: {str(e)}" # Display assistant response in chat message container with line breaks for readability with st.chat_message("assistant"): st.write(response.replace("\n", "
"), unsafe_allow_html=True) # Add assistant response to chat history st.session_state.messages.append({"role": "assistant", "content": response})