File size: 5,138 Bytes
7bda6cd b6834df 7bda6cd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
# Import necessary libraries to use their functions
import os
import streamlit as st
from openai import OpenAI
# Get the API key from the environment settings
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
# Now retrieve the API key
api_key_nvidia = os.getenv("api_key_nvidia")
api_key_nvidia = os.environ.get("api_key_nvidia")
# If there is no API key, show an error message and stop the program
if not api_key_nvidia:
st.error("NVIDIA API key not found. Please set the `api_key_nvidia` environment variable.")
st.stop()
# Add custom design for a finance theme
st.markdown("""
<style>
/* Set background color for the main section */
.main {
background-color: #f4f9f9; /* Light teal for a professional look */
color: #000000; /* Black text for readability */
}
/* Set background color for the sidebar */
.sidebar .sidebar-content {
background-color: #d1e7dd; /* Slightly darker teal */
}
/* Set text color for input fields */
.stTextInput textarea {
color: #000000 !important;
}
/* Change styles for dropdown menu */
.stSelectbox div[data-baseweb="select"] {
color: black !important;
background-color: #d1e7dd !important;
}
/* Change color of dropdown icons */
.stSelectbox svg {
fill: black !important;
}
/* Change background and text color for dropdown options */
.stSelectbox option {
background-color: #d1e7dd !important;
color: black !important;
}
/* Change background and text color for dropdown items */
div[role="listbox"] div {
background-color: #d1e7dd !important;
color: black !important;
}
</style>
""", unsafe_allow_html=True)
# Set the title of the app
st.title("π° Financial Assistant")
# Add a small description under the title
st.caption("π Your AI-Powered Financial Advisor")
# Create the sidebar with options
with st.sidebar:
# Add a dividing line
st.divider()
# Display a section for assistant features
st.markdown("### Assistant Capabilities")
st.markdown("""
- π Investment Analysis
- π³ Budgeting Advice
- π¦ Loan Guidance
- π‘ Retirement Planning
""")
# Add another dividing line
st.divider()
# Show a small footer message
st.markdown("Built with NVIDIA API | LangChain ")
st.markdown("Created with β€ by Nikhil Kumar")
# Start the AI client using the API key
client = OpenAI(
base_url="https://integrate.api.nvidia.com/v1",
api_key=api_key_nvidia
)
# Define a message that tells the AI how to respond
system_prompt_template = (
"I am an expert AI financial assistant. Provide accurate, concise, and empathetic responses "
"to user queries related to investments, budgeting, loans, retirement planning, and other financial matters. "
"Always respond in English."
)
# Initialize chat history if it doesn't exist
if "message_log" not in st.session_state:
st.session_state.message_log = [
{"role": "assistant", "content": "Hi! I'm your Finance Assistant. How can I assist you today? π°"}
]
# Create a container to display chat messages
chat_container = st.container()
# Show chat messages inside the container
with chat_container:
for message in st.session_state.message_log:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Input field for user to type questions
user_query = st.chat_input("Type your finance-related question here...")
# Function to get a response from the AI
def generate_ai_response(messages):
"""
Sends the conversation to the AI model and processes the response.
"""
completion = client.chat.completions.create(
model="deepseek-ai/deepseek-r1",
messages=messages,
temperature=0.5, # Controls randomness of responses
top_p=0.5, # Helps control diversity of responses
max_tokens=1000, # Maximum length of response
stream=True # Enables streaming of responses
)
# Process the AI response piece by piece
response = ""
for chunk in completion:
content = chunk.choices[0].delta.content or ""
response += content
return response
# Handle user input and generate AI responses
if user_query:
# Save the user's message in chat history
st.session_state.message_log.append({"role": "user", "content": user_query})
# Create a list of messages to send to AI
messages = [
{"role": "system", "content": system_prompt_template}, # First message that tells AI how to behave
]
# Add all previous messages to the conversation
for msg in st.session_state.message_log:
role = msg["role"]
if role == "ai":
role = "assistant"
messages.append({"role": role, "content": msg["content"]})
# Show a loading spinner while AI is thinking
with st.spinner("π§ Processing..."):
# Get the AI response
ai_response = generate_ai_response(messages)
# Save the AI's response in chat history
st.session_state.message_log.append({"role": "assistant", "content": ai_response})
# Refresh the page to show the new chat messages
st.rerun() |