|
import os |
|
import streamlit as st |
|
import time |
|
from langchain.chat_models import ChatOpenAI |
|
from langchain.chains import ConversationalRetrievalChain |
|
from langchain.prompts import PromptTemplate |
|
from langchain.memory import ConversationSummaryBufferMemory |
|
from langchain.vectorstores import FAISS |
|
from langchain.embeddings import OpenAIEmbeddings |
|
|
|
|
|
os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY") |
|
|
|
|
|
embeddings = OpenAIEmbeddings() |
|
vectorstore = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True) |
|
|
|
|
|
retriever = vectorstore.as_retriever(search_kwargs={"k": 5}) |
|
|
|
|
|
prompt_template = """ |
|
You are an AI course recommendation system. Your task is to engage in friendly and casual conversation with the user, responding in a warm and approachable manner. If the user initiates a general greeting or casual chat, maintain a conversational tone and avoid mentioning courses unless they explicitly inquire about them. In such cases, gently inquire about their interests in learning or study topics, introducing yourself as an expert in course recommendations. |
|
Summarized Chat History: |
|
{chat_history} |
|
User's Current Query: |
|
{question} |
|
If the user specifically asks about courses or learning opportunities, transition to recommending courses based on their interests and goals. Emphasize matching the learning outcomes and syllabus content for relevant recommendations. Consider the chat history for context. |
|
Relevant Courses: |
|
{context} |
|
When responding to course inquiries, include: |
|
1. A detailed explanation of how the courses align with the user's interests, focusing on "What You Will Learn." |
|
2. A summary of each course, highlighting: |
|
- Skills and knowledge gained |
|
- Key syllabus topics |
|
- Course level and language |
|
- Institution offering the course |
|
3. Course ratings, if available. |
|
4. Additional advice based on their learning journey. |
|
5. Course URLs for easy access. |
|
Be encouraging and supportive, relating your suggestions to user preferences or constraints mentioned in previous messages. |
|
Recommendation: |
|
""" |
|
|
|
|
|
PROMPT = PromptTemplate( |
|
template=prompt_template, |
|
input_variables=["chat_history", "question", "context"] |
|
) |
|
|
|
|
|
llm = ChatOpenAI(temperature=0.5, model_name="gpt-4-turbo") |
|
|
|
|
|
memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=1000, memory_key="chat_history", return_messages=True) |
|
|
|
|
|
qa_chain = ConversationalRetrievalChain.from_llm( |
|
llm=llm, |
|
retriever=retriever, |
|
memory=memory, |
|
combine_docs_chain_kwargs={"prompt": PROMPT} |
|
) |
|
|
|
|
|
st.set_page_config(page_title="AI Course Recommendation Chatbot", page_icon=":book:") |
|
st.title("AI Course Recommendation Chatbot") |
|
|
|
|
|
if "messages" not in st.session_state: |
|
st.session_state.messages = [] |
|
|
|
|
|
for message in st.session_state.messages: |
|
with st.chat_message(message["role"]): |
|
st.markdown(message["content"]) |
|
|
|
|
|
|
|
if prompt := st.chat_input("What are you looking to learn?"): |
|
|
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
|
|
with st.chat_message("user"): |
|
st.markdown(prompt) |
|
|
|
|
|
with st.chat_message("assistant"): |
|
response = qa_chain({"question": prompt}) |
|
response_text = response["answer"] |
|
|
|
|
|
placeholder = st.empty() |
|
|
|
|
|
accumulated_response = "" |
|
|
|
|
|
for char in response_text: |
|
accumulated_response += char |
|
placeholder.markdown(accumulated_response, unsafe_allow_html=True) |
|
time.sleep(0.01) |
|
|
|
|
|
st.session_state.messages.append({"role": "assistant", "content": response_text}) |