File size: 4,344 Bytes
7a9c79b 5046fc6 7a9c79b ac86eff 7a9c79b ac86eff 2614653 ac86eff 7a9c79b ac86eff 7a9c79b ac86eff 7a9c79b 1f92133 7a9c79b 2250dac 7a9c79b 2250dac 5046fc6 7f94f63 5046fc6 77639e7 5046fc6 1f92133 77639e7 5046fc6 77639e7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
import os
import streamlit as st
import time
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationSummaryBufferMemory
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
# Set up the OpenAI API key
os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY")
# Load the FAISS index
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
# Create a retriever from the loaded vector store
retriever = vectorstore.as_retriever(search_kwargs={"k": 5})
# Define a prompt template for course recommendations
prompt_template = """
You are an AI course recommendation system. Your task is to engage in friendly and casual conversation with the user, responding in a warm and approachable manner. If the user initiates a general greeting or casual chat, maintain a conversational tone and avoid mentioning courses unless they explicitly inquire about them. In such cases, gently inquire about their interests in learning or study topics, introducing yourself as an expert in course recommendations.
Summarized Chat History:
{chat_history}
User's Current Query:
{question}
If the user specifically asks about courses or learning opportunities, transition to recommending courses based on their interests and goals. Emphasize matching the learning outcomes and syllabus content for relevant recommendations. Consider the chat history for context.
Relevant Courses:
{context}
When responding to course inquiries, include:
1. A detailed explanation of how the courses align with the user's interests, focusing on "What You Will Learn."
2. A summary of each course, highlighting:
- Skills and knowledge gained
- Key syllabus topics
- Course level and language
- Institution offering the course
3. Course ratings, if available.
4. Additional advice based on their learning journey.
5. Course URLs for easy access.
Be encouraging and supportive, relating your suggestions to user preferences or constraints mentioned in previous messages.
Recommendation:
"""
PROMPT = PromptTemplate(
template=prompt_template,
input_variables=["chat_history", "question", "context"]
)
# Initialize the language model
llm = ChatOpenAI(temperature=0.5, model_name="gpt-4-turbo")
# Set up conversation memory with summarization
memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=1000, memory_key="chat_history", return_messages=True)
# Create the conversational retrieval chain
qa_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
memory=memory,
combine_docs_chain_kwargs={"prompt": PROMPT}
)
# Streamlit app
st.set_page_config(page_title="AI Course Recommendation Chatbot", page_icon=":book:")
st.title("AI Course Recommendation Chatbot")
# Initialize chat history in session state
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input("What are you looking to learn?"):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Assistant response generation with streaming effect
with st.chat_message("assistant"):
response = qa_chain({"question": prompt})
response_text = response["answer"]
# Create an empty placeholder
placeholder = st.empty()
# Initialize an empty string to accumulate the response
accumulated_response = ""
# Stream the response character by character
for char in response_text:
accumulated_response += char
placeholder.markdown(accumulated_response, unsafe_allow_html=True)
time.sleep(0.01) # Add a small delay to create a typing effect
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response_text}) |