Tanmay09516's picture
Update app.py
16a513a verified
raw
history blame
3.17 kB
# app.py
import os
import warnings
from dotenv import load_dotenv
import gradio as gr
from qdrant_search import QdrantSearch
from langchain_groq import ChatGroq
from nomic_embeddings import EmbeddingsModel
# Load environment variables from .env file
load_dotenv()
# Suppress FutureWarnings
warnings.filterwarnings("ignore", category=FutureWarning)
# Disable tokenizers parallelism to avoid potential issues
os.environ["TOKENIZERS_PARALLELISM"] = "FALSE"
# Initialize global variables
collection_names = ["docs_v1_2", "docs_v2_2", "docs_v3_2"]
limit = 5
# Initialize the language model
llm = ChatGroq(model="mixtral-8x7b-32768")
# Initialize the embeddings model
embeddings = EmbeddingsModel()
# Initialize Qdrant search with necessary credentials
search = QdrantSearch(
qdrant_url=os.environ["QDRANT_CLOUD_URL"],
api_key=os.environ["QDRANT_API_KEY"],
embeddings=embeddings
)
def chat_endpoint(question: str):
"""
Handles the chat functionality by processing the user's question,
retrieving relevant documents, generating an answer, and returning sources.
Args:
question (str): The user's question.
Returns:
Tuple[str, str]: The generated answer and the sources used.
"""
query = question.strip()
if not query:
return "❌ **Error:** Query cannot be empty.", "No sources available."
# Step 1: Retrieve relevant documents from Qdrant
retrieved_docs = search.query_multiple_collections(query, collection_names, limit)
if not retrieved_docs:
return "⚠️ **No relevant documents found** for your query.", "No sources available."
# Step 2: Prepare the context from retrieved documents
context = "\n\n".join([doc['text'] for doc in retrieved_docs])
# Step 3: Construct the prompt with context and question
prompt = (
"You are LangAssist, a knowledgeable assistant for the LangChain Python Library. "
"Given the following context from the documentation, provide a helpful answer to the user's question.\n\n"
"### Context:\n{context}\n\n"
"### Question:\n{question}\n\n"
"### Answer:"
).format(context=context, question=query)
# Step 4: Generate an answer using the language model
try:
answer = llm.invoke(prompt)
except Exception as e:
return f"⚠️ **Error generating answer:** {str(e)}", "No sources available."
# Prepare sources
sources_md = "\n\n".join([
f"**Source:** {src['source']}\n**Excerpt:** {src['text']}"
for src in retrieved_docs
])
return answer.content.strip(), sources_md
# Create Gradio Interface
interface = gr.Interface(
fn=chat_endpoint,
inputs=gr.Textbox(
lines=2,
placeholder="Type your question here...",
label="Your Question"
),
outputs=[
gr.Markdown(label="Answer"),
gr.Markdown(label="Sources")
],
title="🗨️ LangAssist Chat",
description="Ask questions about the LangChain Python Library and get answers based on the latest documentation."
)
# If running locally, uncomment the following lines:
# if __name__ == "__main__":
# interface.launch()