File size: 4,154 Bytes
7e1dfd1
 
80ac8cc
7e1dfd1
 
80ac8cc
7e1dfd1
31a10d7
7e1dfd1
 
 
31a10d7
61ab7e5
80ac8cc
31a10d7
7e1dfd1
 
 
6b255cb
 
 
 
 
 
 
 
 
 
 
31a10d7
6b255cb
 
31a10d7
6b255cb
 
 
 
 
 
 
 
 
 
 
 
31a10d7
6b255cb
 
31a10d7
6b255cb
31a10d7
6b255cb
 
31a10d7
80ac8cc
 
 
6b255cb
80ac8cc
6b255cb
 
80ac8cc
 
6b255cb
31a10d7
83ac167
 
80ac8cc
 
83ac167
80ac8cc
 
 
 
 
 
83ac167
 
80ac8cc
 
 
 
 
 
 
 
83ac167
80ac8cc
 
 
83ac167
31a10d7
7e1dfd1
80ac8cc
 
 
 
 
31a10d7
7e1dfd1
 
 
80ac8cc
 
 
 
6b255cb
7e1dfd1
 
 
80ac8cc
7e1dfd1
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import gradio as gr
import openai
import os
from langchain.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.schema import Document

# Load the Sentence Transformer Embedding Model
model_name = "intfloat/e5-small"
embedding_model = HuggingFaceEmbeddings(model_name=model_name)

# Set up OpenAI API Key (Replace with your own API key)
openai.api_key = os.getenv("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")  # Add in Hugging Face Secrets

# Load ChromaDB with RunGalileo Dataset
persist_directory = "./docs/chroma/"
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding_model)

from langchain_community.vectorstores import Chroma
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.schema import Document

# Load the embedding model
model_name = "intfloat/e5-small"
embedding_model = HuggingFaceEmbeddings(model_name=model_name)

# Define the ChromaDB persist directory
persist_directory = "./docs/chroma/"

#  Load ChromaDB (or create if empty)
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding_model)

# Check if documents exist
if vectordb._collection.count() == 0:
    print("⚠️ No documents found in ChromaDB. Re-indexing dataset...")

    # Sample dataset (Replace with real RunGalileo dataset)
    documents = [
        Document(page_content="HVAC systems help regulate indoor temperature."),
        Document(page_content="Chiller plants are used in large buildings for cooling."),
        Document(page_content="BACnet is a common protocol for building automation."),
        Document(page_content="Heat pumps are essential in modern energy-efficient HVAC designs."),
        Document(page_content="Smart thermostats improve energy efficiency through AI-based control.")
    ]

    #  Insert documents into ChromaDB
    vectordb.add_documents(documents)

    print(" Documents successfully indexed into ChromaDB.")
else:
    print(f" ChromaDB contains {vectordb._collection.count()} documents.")


# Function to Retrieve Top-K Relevant Documents
def retrieve_documents(question, k=5):
    """Retrieve top K relevant documents from ChromaDB"""
    docs = vectordb.similarity_search(question, k=k)
    
    if not docs:
        return ["⚠️ No relevant documents found. Try a different query."]
    
    return [doc.page_content for doc in docs]


#  Function to Generate AI Response
import openai

def generate_response(question, context):
    """Generate AI response using OpenAI GPT-4"""
    
    if not context or "No relevant documents found." in context:
        return "No relevant context available. Try a different query."

    full_prompt = f"Context: {context}\n\nQuestion: {question}"

    try:
        client = openai.OpenAI()  # New OpenAI client format
        response = client.chat.completions.create(
            model="gpt-4",
            messages=[
                {"role": "system", "content": "You are an AI assistant that answers user queries based on the given context."},
                {"role": "user", "content": full_prompt}
            ],
            max_tokens=300,
            temperature=0.7
        )
        return response.choices[0].message.content.strip()
    except Exception as e:
        return f"Error generating response: {str(e)}"


#  Full RAG Pipeline
def rag_pipeline(question):
    retrieved_docs = retrieve_documents(question, k=5)
    context = " ".join(retrieved_docs)
    response = generate_response(question, context)
    return response, "\n\n".join(retrieved_docs)

#  Gradio UI Interface
iface = gr.Interface(
    fn=rag_pipeline,
    inputs=gr.Textbox(label="Enter your question"),
    outputs=[
        gr.Textbox(label="Generated Response"),
        gr.Textbox(label="Retrieved Documents")
    ],
    title="RAG-Based Question Answering System ",
    description="Enter a question and retrieve relevant documents with AI-generated response."
)

# ✅ Launch the Gradio App
if __name__ == "__main__":
    iface.launch()