File size: 5,332 Bytes
7a9c79b 5046fc6 7a9c79b b095e6b 7a9c79b 2614653 7a9c79b 2614653 7a9c79b 2614653 7a9c79b cae2161 7a9c79b 2250dac 7a9c79b 2250dac 5046fc6 2250dac 5046fc6 b095e6b 5046fc6 cae2161 5046fc6 b095e6b 7a9c79b 5046fc6 7a9c79b 5046fc6 2614653 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 |
import os
import streamlit as st
import random
import time
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationSummaryBufferMemory
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
import re
# Function to split text into sentences
def split_into_sentences(text):
return re.split(r'(?<=[.!?]) +', text)
# Set up the OpenAI API key
os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY")
# Load the FAISS index
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
# Create a retriever from the loaded vector store
retriever = vectorstore.as_retriever(search_kwargs={"k": 5})
# Define a prompt template for course recommendations
prompt_template = """
You are an AI course recommendation system. Your task is to recommend courses based on the user's description of their interests and goals, with a strong emphasis on matching the learning outcomes and syllabus content. Consider the summarized chat history to provide more relevant and personalized recommendations.
Summarized Chat History:
{chat_history}
User's Current Query: {question}
Based on the user's current query and chat history summary, here are some relevant courses from our database:
{context}
Please provide a personalized course recommendation. Your response should include:
1. A detailed explanation of how the recommended courses match the user's interests and previous queries, focusing primarily on the "What You Will Learn" section and the syllabus content.
2. A summary of each recommended course, highlighting:
- The specific skills and knowledge the user will gain (from "What You Will Learn")
- Key topics covered in the syllabus
- Course level and language
- The institution offering the course
3. Mention the course ratings if available.
4. Any additional advice or suggestions for the user's learning journey, based on the syllabus progression and their conversation history.
5. Provide the course URLs for easy access.
Prioritize courses that have the most relevant learning outcomes and syllabus content matching the user's description and previous interactions. If multiple courses are similarly relevant, you may suggest a learning path combining complementary courses.
Remember to be encouraging and supportive in your recommendation, and relate your suggestions to any preferences or constraints the user has mentioned in previous messages.
Recommendation:
"""
PROMPT = PromptTemplate(
template=prompt_template,
input_variables=["chat_history", "question", "context"]
)
# Initialize the language model
llm = ChatOpenAI(temperature=0.5, model_name="gpt-4-turbo")
# Set up conversation memory with summarization
memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=1000, memory_key="chat_history", return_messages=True)
# Create the conversational retrieval chain
qa_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
memory=memory,
combine_docs_chain_kwargs={"prompt": PROMPT}
)
# Streamlit app
st.set_page_config(page_title="AI Course Recommendation Chatbot", page_icon=":book:")
st.title("AI Course Recommendation Chatbot")
# Custom CSS for styling
st.markdown("""
<style>
.chat-container {
max-width: 800px;
margin: auto;
padding: 20px;
border-radius: 10px;
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.1);
background-color: #f9f9f9;
}
.user-message {
background-color: #d1e7dd;
border-radius: 10px;
padding: 10px;
margin: 5px 0;
text-align: left;
}
.assistant-message {
background-color: #e2e3e5;
border-radius: 10px;
padding: 10px;
margin: 5px 0;
text-align: left;
}
</style>
""", unsafe_allow_html=True)
# Initialize chat history in session state
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input("What are you looking to learn?"):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Assistant response generator with sentence streaming
with st.chat_message("assistant"):
response = qa_chain({"question": prompt})
response_text = response["answer"]
# Split response into sentences and display one by one
sentences = split_into_sentences(response_text)
for sentence in sentences:
st.markdown(sentence)
time.sleep(1) # Delay between sentences (adjust as needed)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response_text})
# Optional: Add a button to clear the chat history
if st.button("Clear Chat History"):
st.session_state.messages.clear()
st.experimental_rerun() |