|
import os |
|
import streamlit as st |
|
import random |
|
import time |
|
from langchain.chat_models import ChatOpenAI |
|
from langchain.chains import ConversationalRetrievalChain |
|
from langchain.prompts import PromptTemplate |
|
from langchain.memory import ConversationSummaryBufferMemory |
|
from langchain.vectorstores import FAISS |
|
from langchain.embeddings import OpenAIEmbeddings |
|
|
|
|
|
os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY") |
|
|
|
|
|
embeddings = OpenAIEmbeddings() |
|
vectorstore = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True) |
|
|
|
|
|
retriever = vectorstore.as_retriever(search_kwargs={"k": 5}) |
|
|
|
|
|
prompt_template = """ |
|
You are an AI course recommendation system. Your task is to assist users by engaging them in conversation and providing course recommendations only when they explicitly ask for it. |
|
|
|
If the user does not indicate interest in receiving course recommendations, focus on engaging them in conversation and understanding their interests. |
|
|
|
Consider the summarized chat history to provide more relevant and personalized recommendations when requested. |
|
|
|
Summarized Chat History: |
|
{chat_history} |
|
|
|
User's Current Query: {question} |
|
|
|
If the user has asked for course recommendations, provide a list of relevant courses based on their interests and goals, emphasizing how the courses match the learning outcomes and syllabus content. |
|
|
|
Your response should include: |
|
1. A detailed explanation of how the recommended courses align with the user's interests and previous queries, focusing on the "What You Will Learn" section and the syllabus content. |
|
2. A summary of each recommended course, highlighting: |
|
- The specific skills and knowledge the user will gain (from "What You Will Learn") |
|
- Key topics covered in the syllabus |
|
- Course level and language |
|
- The institution offering the course |
|
3. Mention the course ratings if available. |
|
4. Any additional advice or suggestions for the user's learning journey, based on the syllabus progression and their conversation history. |
|
5. Provide the course URLs for easy access. |
|
|
|
If the user has not explicitly requested course recommendations, respond in a conversational manner, encouraging them to share more about their interests or asking follow-up questions to better understand their needs. |
|
|
|
Recommendation: |
|
""" |
|
|
|
PROMPT = PromptTemplate( |
|
template=prompt_template, |
|
input_variables=["chat_history", "question", "context"] |
|
) |
|
|
|
|
|
llm = ChatOpenAI(temperature=0.5, model_name="gpt-4-turbo") |
|
|
|
|
|
memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=1000, memory_key="chat_history", return_messages=True) |
|
|
|
|
|
qa_chain = ConversationalRetrievalChain.from_llm( |
|
llm=llm, |
|
retriever=retriever, |
|
memory=memory, |
|
combine_docs_chain_kwargs={"prompt": PROMPT} |
|
) |
|
|
|
|
|
st.set_page_config(page_title="AI Course Recommendation Chatbot", page_icon=":book:") |
|
st.title("AI Course Recommendation Chatbot") |
|
|
|
|
|
st.markdown(""" |
|
<style> |
|
.chat-container { |
|
max-width: 800px; |
|
margin: auto; |
|
padding: 20px; |
|
border-radius: 10px; |
|
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.1); |
|
background-color: #f9f9f9; |
|
} |
|
.user-message { |
|
background-color: #d1e7dd; |
|
border-radius: 10px; |
|
padding: 10px; |
|
margin: 5px 0; |
|
text-align: left; |
|
} |
|
.assistant-message { |
|
background-color: #e2e3e5; |
|
border-radius: 10px; |
|
padding: 10px; |
|
margin: 5px 0; |
|
text-align: left; |
|
} |
|
</style> |
|
""", unsafe_allow_html=True) |
|
|
|
|
|
if "messages" not in st.session_state: |
|
st.session_state.messages = [] |
|
|
|
|
|
for message in st.session_state.messages: |
|
with st.chat_message(message["role"]): |
|
st.markdown(message["content"]) |
|
|
|
|
|
if prompt := st.chat_input("What are you looking to learn?"): |
|
|
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
|
|
with st.chat_message("user"): |
|
st.markdown(prompt) |
|
|
|
|
|
with st.chat_message("assistant"): |
|
response = qa_chain({"question": prompt}) |
|
response_text = response["answer"] |
|
|
|
|
|
for word in response_text.split(): |
|
st.markdown(word + " ", unsafe_allow_html=True) |
|
time.sleep(0.05) |
|
|
|
|
|
st.session_state.messages.append({"role": "assistant", "content": response_text}) |
|
|
|
|
|
if st.button("Clear Chat History"): |
|
st.session_state.messages.clear() |
|
st.experimental_rerun() |
|
|