# Read the data import pandas as pd df = pd.read_csv('./Automobile_data.csv') #df = df.drop(columns = ['normalized-losses','symboling'], axis = 1) context_data = [] for i in range(len(df)): # Loop over rows context = "" for j in range(26): # Loop over the first 8 columns context += df.columns[j] # Add column name context += ": " context += str(df.iloc[i][j]) # Convert value to string context += " " context_data.append(context) import os # Get the secret key from the environment groq_key = os.environ.get('groq_API_Keys') ## LLM used for RAG from langchain_groq import ChatGroq llm = ChatGroq(model="llama-3.1-70b-versatile",api_key=groq_key) ## Embedding model! from langchain_huggingface import HuggingFaceEmbeddings embed_model = HuggingFaceEmbeddings(model_name="mixedbread-ai/mxbai-embed-large-v1") # create vector store! from langchain_chroma import Chroma vectorstore = Chroma( collection_name="car_dataset_store", embedding_function=embed_model, persist_directory="./", ) # add data to vector nstore vectorstore.add_texts(context_data) retriever = vectorstore.as_retriever() from langchain_core.prompts import PromptTemplate template = ("""You are a car expert. Use the provided context to answer the question. If you don't know the answer, say so. Explain your answer in detail. Do not discuss the context in your response; just provide the answer directly. Context: {context} Question: {question} Answer:""") rag_prompt = PromptTemplate.from_template(template) from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnablePassthrough rag_chain = ( {"context": retriever, "question": RunnablePassthrough()} | rag_prompt | llm | StrOutputParser() ) import gradio as gr # Mock for `rag_chain.stream` or similar streaming function class MockRAGChain: @staticmethod def stream(message): response = f"Processing your message: {message}" yield response # Mock single response for testing rag_chain = MockRAGChain() # Replace with your actual RAG chain object # Function for chatbot responses (simplified for testing) def rag_memory_stream(message, history): for response in rag_chain.stream(message): # Streaming response return response # Mock response for now # Examples and app information examples = ['I need a car', 'What is the make and fuel type of a car?'] description = "An advanced chatbot that helps you choose the right car based on your preferences and budget." title = "Car Expert :) Let Me Help You Find the Perfect Ride!" # Custom theme with sky-blue background and black text custom_theme = gr.themes.Base(primary_hue="blue", secondary_hue="green").set( body_background_fill="#87CEEB", # Sky blue background body_text_color="#000000", # Black text ) # Interface with Car Preferences and Chat with gr.Blocks(theme=custom_theme) as demo: gr.Markdown(f"# {title}") gr.Markdown(description) with gr.Tabs(): # Chat Tab with gr.Tab("Chat"): chat_interface = gr.ChatInterface( fn=rag_memory_stream, # Simplified chat function type="value", # Use "value" to simplify interactions examples=examples, ) # Additional Tabs (optional) with gr.Tab("Car Preferences"): gr.Markdown("This tab handles car preferences (not modified here).") # Launch the app if __name__ == "__main__": demo.launch()