mavinsao's picture
Update app.py
cae2161 verified
raw
history blame
5.09 kB
import os
import streamlit as st
import random
import time
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationSummaryBufferMemory
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
# Set up the OpenAI API key
os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY")
# Load the FAISS index
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
# Create a retriever from the loaded vector store
retriever = vectorstore.as_retriever(search_kwargs={"k": 5})
# Define a prompt template for course recommendations
prompt_template = """
You are an AI course recommendation system. Your task is to assist users by engaging them in conversation and providing course recommendations only when they explicitly ask for it.
If the user does not indicate interest in receiving course recommendations, focus on engaging them in conversation and understanding their interests.
Consider the summarized chat history to provide more relevant and personalized recommendations when requested.
Summarized Chat History:
{chat_history}
User's Current Query: {question}
If the user has asked for course recommendations, provide a list of relevant courses based on their interests and goals, emphasizing how the courses match the learning outcomes and syllabus content.
Your response should include:
1. A detailed explanation of how the recommended courses align with the user's interests and previous queries, focusing on the "What You Will Learn" section and the syllabus content.
2. A summary of each recommended course, highlighting:
- The specific skills and knowledge the user will gain (from "What You Will Learn")
- Key topics covered in the syllabus
- Course level and language
- The institution offering the course
3. Mention the course ratings if available.
4. Any additional advice or suggestions for the user's learning journey, based on the syllabus progression and their conversation history.
5. Provide the course URLs for easy access.
If the user has not explicitly requested course recommendations, respond in a conversational manner, encouraging them to share more about their interests or asking follow-up questions to better understand their needs.
Recommendation:
"""
PROMPT = PromptTemplate(
template=prompt_template,
input_variables=["chat_history", "question", "context"]
)
# Initialize the language model
llm = ChatOpenAI(temperature=0.5, model_name="gpt-4-turbo")
# Set up conversation memory with summarization
memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=1000, memory_key="chat_history", return_messages=True)
# Create the conversational retrieval chain
qa_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
memory=memory,
combine_docs_chain_kwargs={"prompt": PROMPT}
)
# Streamlit app
st.set_page_config(page_title="AI Course Recommendation Chatbot", page_icon=":book:")
st.title("AI Course Recommendation Chatbot")
# Custom CSS for styling
st.markdown("""
<style>
.chat-container {
max-width: 800px;
margin: auto;
padding: 20px;
border-radius: 10px;
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.1);
background-color: #f9f9f9;
}
.user-message {
background-color: #d1e7dd;
border-radius: 10px;
padding: 10px;
margin: 5px 0;
text-align: left;
}
.assistant-message {
background-color: #e2e3e5;
border-radius: 10px;
padding: 10px;
margin: 5px 0;
text-align: left;
}
</style>
""", unsafe_allow_html=True)
# Initialize chat history in session state
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input("What are you looking to learn?"):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Assistant response generator with streaming effect
with st.chat_message("assistant"):
response = qa_chain({"question": prompt})
response_text = response["answer"]
# Simulate streaming response
for word in response_text.split():
st.markdown(word + " ", unsafe_allow_html=True)
time.sleep(0.05) # Delay for effect
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response_text})
# Optional: Add a button to clear the chat history
if st.button("Clear Chat History"):
st.session_state.messages.clear()
st.experimental_rerun()