import os import streamlit as st import time from langchain.chat_models import ChatOpenAI from langchain.chains import ConversationalRetrievalChain from langchain.prompts import PromptTemplate from langchain.memory import ConversationSummaryBufferMemory from langchain.vectorstores import FAISS from langchain.embeddings import OpenAIEmbeddings # Set up the OpenAI API key os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY") # Load the FAISS index embeddings = OpenAIEmbeddings() vectorstore = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True) # Create a retriever from the loaded vector store retriever = vectorstore.as_retriever(search_kwargs={"k": 5}) # Define a prompt template for course recommendations prompt_template = """ You are an AI-powered course recommendation expert with extensive knowledge of educational programs across various disciplines. Your primary goal is to provide personalized, high-quality course suggestions tailored to each user's unique interests, goals, and background. Do not retrieve course recommendations if the user hasn't specifically asked for them or is simply greeting the chatbot. In such general cases, focus on engaging the user by asking about their learning interests or what they are looking to explore. Conversation History: {chat_history} Current User Query: {question} Relevant Courses from Database: {context} Instructions for Crafting Your Response: 1. Engagement and Tone: - Begin with a warm, friendly greeting if this is a new interaction. - Maintain a professional yet approachable tone throughout the conversation. - If the user initiates casual chat, engage briefly before steering the conversation towards educational interests. 2. Analysis and Recommendation: - Carefully analyze the user's query and conversation history to understand their educational needs, interests, and any constraints. - Select the most relevant courses from the provided context, prioritizing those with learning outcomes and syllabus content that closely match the user's requirements. 3. Detailed Course Recommendations: For each recommended course, provide: - Course title and offering institution - A concise overview of the course content - Specific skills and knowledge to be gained (from "What You Will Learn") - Key topics covered in the syllabus - Course level, duration, and language of instruction - Course ratings and reviews, if available - Direct URL to the course page 4. Personalized Explanation: - Clearly articulate how each recommended course aligns with the user's expressed interests and goals. - Highlight specific aspects of the course that address the user's needs or previous queries. Remember to prioritize accuracy, relevance, and user-centricity in your recommendations. Your goal is to empower the user to make informed decisions about their educational path. Recommendation: """ PROMPT = PromptTemplate( template=prompt_template, input_variables=["chat_history", "question", "context"] ) # Initialize the language model llm = ChatOpenAI(temperature=0.7, model_name="gpt-4o") # Set up conversation memory with summarization memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=1000, memory_key="chat_history", return_messages=True) # Create the conversational retrieval chain qa_chain = ConversationalRetrievalChain.from_llm( llm=llm, retriever=retriever, memory=memory, combine_docs_chain_kwargs={"prompt": PROMPT} ) # Streamlit app st.set_page_config(page_title="HCourse Recommendation Chatbot", page_icon=":book:") st.title("HONEY BEE: Course Recommendation Chatbot 🐝") # Initialize chat history in session state if "messages" not in st.session_state: st.session_state.messages = [] # Add introductory message welcome_message = ( "Hello! I'm HONEY BEE, your friendly Course Recommendation Chatbot! 🐝 " "I'm here to help you find the best courses based on your interests and goals. " "Feel free to ask me anything about learning or courses!" ) st.session_state.messages.append({"role": "assistant", "content": welcome_message}) # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # Accept user input if prompt := st.chat_input("What are you looking to learn?"): # Add user message to chat history st.session_state.messages.append({"role": "user", "content": prompt}) # Display user message in chat message container with st.chat_message("user"): st.markdown(prompt) # Assistant response generation with streaming effect with st.chat_message("assistant"): response = qa_chain({"question": prompt}) response_text = response["answer"] # Create an empty placeholder placeholder = st.empty() # Initialize an empty string to accumulate the response accumulated_response = "" # Stream the response character by character for char in response_text: accumulated_response += char placeholder.markdown(accumulated_response, unsafe_allow_html=True) time.sleep(0.01) # Add a small delay to create a typing effect # Add assistant response to chat history st.session_state.messages.append({"role": "assistant", "content": response_text})