File size: 3,406 Bytes
1924a16
9408cb5
 
1924a16
1a48792
 
 
1924a16
0d02cc4
16a513a
9408cb5
1924a16
9408cb5
 
 
 
 
 
 
1924a16
 
 
 
 
9408cb5
 
 
1924a16
 
 
 
 
 
9408cb5
1924a16
 
 
 
 
 
a70133d
 
 
 
 
 
 
 
 
 
 
 
1924a16
 
9408cb5
1924a16
9408cb5
1924a16
 
9408cb5
1924a16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4d37355
1924a16
4d37355
1924a16
 
4d37355
 
 
 
 
 
 
1924a16
4d37355
 
1924a16
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
# app.py
import os
from dotenv import load_dotenv
from pydantic import BaseModel
from qdrant_search import QdrantSearch
from langchain_groq import ChatGroq
from nomic_embeddings import EmbeddingsModel
import gradio as gr

load_dotenv()

import warnings
warnings.filterwarnings("ignore", category=FutureWarning)

os.environ["TOKENIZERS_PARALLELISM"] = "FALSE"

# Initialize global variables
collection_names = ["docs_v1_2", "docs_v2_2", "docs_v3_2"]
limit = 5
llm = ChatGroq(model="mixtral-8x7b-32768")
embeddings = EmbeddingsModel()
search = QdrantSearch(
    qdrant_url=os.environ["QDRANT_CLOUD_URL"],
    api_key=os.environ["QDRANT_API_KEY"],
    embeddings=embeddings
)

# Define the query processing function
def chat_with_langassist(query: str):
    if not query.strip():
        return "Query cannot be empty.", []
    
    # Retrieve relevant documents from Qdrant
    retrieved_docs = search.query_multiple_collections(query, collection_names, limit)
    
    # Prepare the context from retrieved documents
    context = "\n".join([doc['text'] for doc in retrieved_docs])
    
    # Construct the prompt with context and question
    prompt = (
        # "You are LangAssist, a knowledgeable assistant for the LangChain Python Library. "
        # "Given the following context from the documentation, provide a helpful answer to the user's question.\n\n"
        # "Context:\n{context}\n\n"
        # "Question: {question}\n\n"
        # "Answer:"
        "You are LangChat, a knowledgeable assistant for the LangChain Python Library. "
            "Given the following context from the documentation, provide a helpful answer to the user's question. \n\n"
            "Context:\n{context}\n\n"
            "You can ignore the context if the question is a simple chat like Hi, hello, and just respond in a normal manner as LangChat, otherwise use the context to answer the query."
            "If you can't find the answer from the sources, mention that clearly instead of making up an answer.\n\n"
            "Question: {question}\n\n"
            "Answer:"
    ).format(context=context, question=query)
    # Generate an answer using the language model
    try:
        answer = llm.invoke(prompt).content.strip()
    except Exception as e:
        return f"Error: {str(e)}", []
    
    # Prepare sources
    sources = [
        {
            "source": doc['source'],
            "text": doc['text']
        } for doc in retrieved_docs
    ]
    
    return answer, sources

# Define Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("<h1>LangAssist Chat</h1>")
    chatbot = gr.Chatbot()
    msg = gr.Textbox()
    clear = gr.Button("Clear")
    sources_display = gr.Markdown(label="Sources")

    def respond(message, chat_history, sources_display):
        answer, sources = chat_with_langassist(message)
        chat_history.append((message, answer))
        
        if sources:
            formatted_sources = "\n".join([f"- **Source:** {source['source']}\n  **Text:** {source['text']}" for source in sources])
        else:
            formatted_sources = "No sources available."
        
        return chat_history, gr.update(value=''), formatted_sources

    msg.submit(respond, [msg, chatbot, sources_display], [chatbot, msg, sources_display])
    clear.click(lambda: None, None, [chatbot, sources_display])

# Run the Gradio app
if __name__ == "__main__":
    demo.launch()