import os import streamlit as st import time from langchain.chat_models import ChatOpenAI from langchain.chains import ConversationalRetrievalChain from langchain.prompts import PromptTemplate from langchain.memory import ConversationSummaryBufferMemory from langchain.vectorstores import FAISS from langchain.embeddings import OpenAIEmbeddings # Set up the OpenAI API key os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY") # Load the FAISS index embeddings = OpenAIEmbeddings() vectorstore = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True) # Create a retriever from the loaded vector store retriever = vectorstore.as_retriever(search_kwargs={"k": 5}) # Define a prompt template for course recommendations prompt_template = """ You are an AI course recommendation system. Your task is to engage in friendly and casual conversation with the user, responding in a warm and approachable manner. If the user initiates a general greeting or casual chat, maintain a conversational tone and avoid mentioning courses unless they explicitly inquire about them. In such cases, gently inquire about their interests in learning or study topics, introducing yourself as an expert in course recommendations. Summarized Chat History: {chat_history} User's Current Query: {question} If the user specifically asks about courses or learning opportunities, transition to recommending courses based on their interests and goals. Emphasize matching the learning outcomes and syllabus content for relevant recommendations. Consider the chat history for context. Relevant Courses: {context} When responding to course inquiries, include: 1. A detailed explanation of how the courses align with the user's interests, focusing on "What You Will Learn." 2. A summary of each course, highlighting: - Skills and knowledge gained - Key syllabus topics - Course level and language - Institution offering the course 3. Course ratings, if available. 4. Additional advice based on their learning journey. 5. Course URLs for easy access. Be encouraging and supportive, relating your suggestions to user preferences or constraints mentioned in previous messages. Recommendation: """ PROMPT = PromptTemplate( template=prompt_template, input_variables=["chat_history", "question", "context"] ) # Initialize the language model llm = ChatOpenAI(temperature=0.5, model_name="gpt-4-turbo") # Set up conversation memory with summarization memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=1000, memory_key="chat_history", return_messages=True) # Create the conversational retrieval chain qa_chain = ConversationalRetrievalChain.from_llm( llm=llm, retriever=retriever, memory=memory, combine_docs_chain_kwargs={"prompt": PROMPT} ) # Streamlit app st.set_page_config(page_title="AI Course Recommendation Chatbot", page_icon=":book:") st.title("AI Course Recommendation Chatbot") # Initialize chat history in session state if "messages" not in st.session_state: st.session_state.messages = [] # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # Accept user input if prompt := st.chat_input("What are you looking to learn?"): # Add user message to chat history st.session_state.messages.append({"role": "user", "content": prompt}) # Display user message in chat message container with st.chat_message("user"): st.markdown(prompt) # Assistant response generation with streaming effect with st.chat_message("assistant"): response = qa_chain({"question": prompt}) response_text = response["answer"] # Create an empty placeholder placeholder = st.empty() # Initialize an empty string to accumulate the response accumulated_response = "" # Stream the response character by character for char in response_text: accumulated_response += char placeholder.markdown(accumulated_response, unsafe_allow_html=True) time.sleep(0.01) # Add a small delay to create a typing effect # Add assistant response to chat history st.session_state.messages.append({"role": "assistant", "content": response_text})