File size: 6,230 Bytes
7a9c79b 5046fc6 7a9c79b cf8bf92 7a9c79b cf8bf92 ac86eff cf8bf92 2614653 cf8bf92 7a9c79b ac86eff 7a9c79b 1f92133 7a9c79b cf8bf92 7a9c79b cf8bf92 7a9c79b 2250dac 5046fc6 cf8bf92 5046fc6 cf8bf92 5046fc6 1f92133 77639e7 5046fc6 77639e7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 |
import os
import streamlit as st
import time
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationSummaryBufferMemory
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
# Set up the OpenAI API key
os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY")
# Load the FAISS index
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
# Create a retriever from the loaded vector store
retriever = vectorstore.as_retriever(search_kwargs={"k": 5})
# Define a prompt template for course recommendations
prompt_template = """
You are an AI-powered course recommendation expert with extensive knowledge of educational programs across various disciplines. Your primary goal is to provide personalized, high-quality course suggestions tailored to each user's unique interests, goals, and background.
Conversation History:
{chat_history}
Current User Query:
{question}
Relevant Courses from Database:
{context}
Instructions for Crafting Your Response:
1. Engagement and Tone:
- Begin with a warm, friendly greeting if this is a new interaction.
- Maintain a professional yet approachable tone throughout the conversation.
- If the user initiates casual chat, engage briefly before steering the conversation towards educational interests.
2. Analysis and Recommendation:
- Carefully analyze the user's query and conversation history to understand their educational needs, interests, and any constraints.
- Select the most relevant courses from the provided context, prioritizing those with learning outcomes and syllabus content that closely match the user's requirements.
3. Detailed Course Recommendations:
For each recommended course, provide:
- Course title and offering institution
- A concise overview of the course content
- Specific skills and knowledge to be gained (from "What You Will Learn")
- Key topics covered in the syllabus
- Course level, duration, and language of instruction
- Course ratings and reviews, if available
- Direct URL to the course page
4. Personalized Explanation:
- Clearly articulate how each recommended course aligns with the user's expressed interests and goals.
- Highlight specific aspects of the course that address the user's needs or previous queries.
5. Learning Path Suggestions:
- If appropriate, suggest a learning path combining complementary courses.
- Explain how the courses build upon each other or cover different aspects of the user's area of interest.
6. Additional Guidance:
- Offer advice on course selection based on the user's background and goals.
- Suggest supplementary resources or preparatory materials if relevant.
- Address any potential challenges or prerequisites the user should consider.
7. Encouragement and Next Steps:
- Provide encouraging words to motivate the user in their learning journey.
- Suggest clear next steps, such as exploring course details or considering enrollment options.
- Invite further questions or clarifications about the recommendations.
8. Adaptability:
- If the user expresses dissatisfaction with initial recommendations, quickly pivot to alternative suggestions.
- Be prepared to refine recommendations based on additional information or feedback from the user.
Remember to prioritize accuracy, relevance, and user-centricity in your recommendations. Your goal is to empower the user to make informed decisions about their educational path.
Recommendation:
"""
PROMPT = PromptTemplate(
template=prompt_template,
input_variables=["chat_history", "question", "context"]
)
# Initialize the language model
llm = ChatOpenAI(temperature=0.5, model_name="gpt-4o-mini")
# Set up conversation memory with summarization
memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=1000, memory_key="chat_history", return_messages=True)
# Create the conversational retrieval chain
qa_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
memory=memory,
combine_docs_chain_kwargs={"prompt": PROMPT}
)
# Streamlit app
st.set_page_config(page_title="HCourse Recommendation Chatbot", page_icon=":book:")
st.title("HONEY BEE: Course Recommendation Chatbot π")
# Initialize chat history in session state
if "messages" not in st.session_state:
st.session_state.messages = []
# Add introductory message
welcome_message = (
"Hello! I'm HONEY BEE, your friendly Course Recommendation Chatbot! π "
"I'm here to help you find the best courses based on your interests and goals. "
"Feel free to ask me anything about learning or courses!"
)
st.session_state.messages.append({"role": "assistant", "content": welcome_message})
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input("What are you looking to learn?"):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Assistant response generation with streaming effect
with st.chat_message("assistant"):
response = qa_chain({"question": prompt})
response_text = response["answer"]
# Create an empty placeholder
placeholder = st.empty()
# Initialize an empty string to accumulate the response
accumulated_response = ""
# Stream the response character by character
for char in response_text:
accumulated_response += char
placeholder.markdown(accumulated_response, unsafe_allow_html=True)
time.sleep(0.01) # Add a small delay to create a typing effect
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response_text}) |