import os import streamlit as st import random import time from langchain.chat_models import ChatOpenAI from langchain.chains import ConversationalRetrievalChain from langchain.prompts import PromptTemplate from langchain.memory import ConversationSummaryBufferMemory from langchain.vectorstores import FAISS from langchain.embeddings import OpenAIEmbeddings # Set up the OpenAI API key os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY") # Load the FAISS index embeddings = OpenAIEmbeddings() vectorstore = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True) # Create a retriever from the loaded vector store retriever = vectorstore.as_retriever(search_kwargs={"k": 5}) # Define a prompt template for course recommendations prompt_template = """ You are an AI course recommendation system. Your task is to assist users by engaging them in conversation and providing course recommendations only when they explicitly ask for it. If the user does not indicate interest in receiving course recommendations, focus on engaging them in conversation and understanding their interests. Consider the summarized chat history to provide more relevant and personalized recommendations when requested. Summarized Chat History: {chat_history} User's Current Query: {question} If the user has asked for course recommendations, provide a list of relevant courses based on their interests and goals, emphasizing how the courses match the learning outcomes and syllabus content. Your response should include: 1. A detailed explanation of how the recommended courses align with the user's interests and previous queries, focusing on the "What You Will Learn" section and the syllabus content. 2. A summary of each recommended course, highlighting: - The specific skills and knowledge the user will gain (from "What You Will Learn") - Key topics covered in the syllabus - Course level and language - The institution offering the course 3. Mention the course ratings if available. 4. Any additional advice or suggestions for the user's learning journey, based on the syllabus progression and their conversation history. 5. Provide the course URLs for easy access. If the user has not explicitly requested course recommendations, respond in a conversational manner, encouraging them to share more about their interests or asking follow-up questions to better understand their needs. Recommendation: """ PROMPT = PromptTemplate( template=prompt_template, input_variables=["chat_history", "question", "context"] ) # Initialize the language model llm = ChatOpenAI(temperature=0.5, model_name="gpt-4-turbo") # Set up conversation memory with summarization memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=1000, memory_key="chat_history", return_messages=True) # Create the conversational retrieval chain qa_chain = ConversationalRetrievalChain.from_llm( llm=llm, retriever=retriever, memory=memory, combine_docs_chain_kwargs={"prompt": PROMPT} ) # Streamlit app st.set_page_config(page_title="AI Course Recommendation Chatbot", page_icon=":book:") st.title("AI Course Recommendation Chatbot") # Custom CSS for styling st.markdown(""" """, unsafe_allow_html=True) # Initialize chat history in session state if "messages" not in st.session_state: st.session_state.messages = [] # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # Accept user input if prompt := st.chat_input("What are you looking to learn?"): # Add user message to chat history st.session_state.messages.append({"role": "user", "content": prompt}) # Display user message in chat message container with st.chat_message("user"): st.markdown(prompt) # Retrieve relevant context from the vector store based on user input context = retriever.retrieve(prompt) # Assistant response generator with streaming effect with st.chat_message("assistant"): response = qa_chain({"question": prompt, "context": context}) response_text = response["answer"] # Simulate streaming response full_response = "" for word in response_text.split(): full_response += word + " " st.markdown(full_response.strip(), unsafe_allow_html=True) # Display the accumulated response time.sleep(0.05) # Delay for effect # Add assistant response to chat history st.session_state.messages.append({"role": "assistant", "content": response_text}) # Optional: Add a button to clear the chat history if st.button("Clear Chat History"): st.session_state.messages.clear() st.experimental_rerun()