File size: 1,895 Bytes
60e4d0e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
import os
import time
import streamlit as st
from qa_loader import load_qa_and_create_vectorstore
from rag_chain import generate_response
from dotenv import load_dotenv
# πΉ Load environment variables
load_dotenv()
# πΉ Streamlit Page Configuration
st.set_page_config(page_title="Vistula University AI Assistant", layout="centered")
# πΉ Title and Description
st.title("π Vistula University AI Assistant")
st.write("π Ask me anything about Vistula University!")
# πΉ Retrieve Data (Cached for Performance)
@st.cache_resource
def get_retriever():
return load_qa_and_create_vectorstore()
retriever = get_retriever()
if isinstance(retriever, tuple):
retriever = retriever[0]
# πΉ Start or Load Chat History
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
# πΉ Display Chat History
st.write("### ποΈ Chat History")
for entry in st.session_state.chat_history:
with st.chat_message("user"):
st.write(entry["question"])
with st.chat_message("assistant"):
st.write(entry["answer"])
# πΉ User Input
query = st.chat_input("Ask your question about Vistula University!")
# πΉ Process When User Submits a Question
if query:
with st.spinner("π€ Thinking..."):
response = generate_response(retriever, query)
# πΉ Add to Chat History
st.session_state.chat_history.append({
"question": query,
"answer": response
})
# πΉ Display User Question and AI Response
with st.chat_message("user"):
st.write(query)
with st.chat_message("assistant"):
placeholder = st.empty()
current_text = ""
# Typing Effect
for word in response.split():
current_text += word + " "
placeholder.write(current_text)
time.sleep(0.05)
|