File size: 5,092 Bytes
7a9c79b 5046fc6 7a9c79b 1e53f6e 7a9c79b 1e53f6e 7a9c79b 1e53f6e 7a9c79b 1e53f6e 7a9c79b cae2161 7a9c79b 2250dac 7a9c79b 2250dac 5046fc6 2250dac 5046fc6 cae2161 5046fc6 cae2161 5046fc6 cae2161 5046fc6 7a9c79b 5046fc6 7a9c79b 5046fc6 cae2161 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
import os
import streamlit as st
import random
import time
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationSummaryBufferMemory
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
# Set up the OpenAI API key
os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY")
# Load the FAISS index
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
# Create a retriever from the loaded vector store
retriever = vectorstore.as_retriever(search_kwargs={"k": 5})
# Define a prompt template for course recommendations
prompt_template = """
You are an AI course recommendation system. Your task is to assist users by engaging them in conversation and providing course recommendations only when they explicitly ask for it.
If the user does not indicate interest in receiving course recommendations, focus on engaging them in conversation and understanding their interests.
Consider the summarized chat history to provide more relevant and personalized recommendations when requested.
Summarized Chat History:
{chat_history}
User's Current Query: {question}
If the user has asked for course recommendations, provide a list of relevant courses based on their interests and goals, emphasizing how the courses match the learning outcomes and syllabus content.
Your response should include:
1. A detailed explanation of how the recommended courses align with the user's interests and previous queries, focusing on the "What You Will Learn" section and the syllabus content.
2. A summary of each recommended course, highlighting:
- The specific skills and knowledge the user will gain (from "What You Will Learn")
- Key topics covered in the syllabus
- Course level and language
- The institution offering the course
3. Mention the course ratings if available.
4. Any additional advice or suggestions for the user's learning journey, based on the syllabus progression and their conversation history.
5. Provide the course URLs for easy access.
If the user has not explicitly requested course recommendations, respond in a conversational manner, encouraging them to share more about their interests or asking follow-up questions to better understand their needs.
Recommendation:
"""
PROMPT = PromptTemplate(
template=prompt_template,
input_variables=["chat_history", "question", "context"]
)
# Initialize the language model
llm = ChatOpenAI(temperature=0.5, model_name="gpt-4-turbo")
# Set up conversation memory with summarization
memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=1000, memory_key="chat_history", return_messages=True)
# Create the conversational retrieval chain
qa_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
memory=memory,
combine_docs_chain_kwargs={"prompt": PROMPT}
)
# Streamlit app
st.set_page_config(page_title="AI Course Recommendation Chatbot", page_icon=":book:")
st.title("AI Course Recommendation Chatbot")
# Custom CSS for styling
st.markdown("""
<style>
.chat-container {
max-width: 800px;
margin: auto;
padding: 20px;
border-radius: 10px;
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.1);
background-color: #f9f9f9;
}
.user-message {
background-color: #d1e7dd;
border-radius: 10px;
padding: 10px;
margin: 5px 0;
text-align: left;
}
.assistant-message {
background-color: #e2e3e5;
border-radius: 10px;
padding: 10px;
margin: 5px 0;
text-align: left;
}
</style>
""", unsafe_allow_html=True)
# Initialize chat history in session state
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input("What are you looking to learn?"):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Assistant response generator with streaming effect
with st.chat_message("assistant"):
response = qa_chain({"question": prompt})
response_text = response["answer"]
# Simulate streaming response
for word in response_text.split():
st.markdown(word + " ", unsafe_allow_html=True)
time.sleep(0.05) # Delay for effect
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response_text})
# Optional: Add a button to clear the chat history
if st.button("Clear Chat History"):
st.session_state.messages.clear()
st.experimental_rerun()
|