File size: 1,628 Bytes
44ab0cd
 
 
 
 
 
 
 
 
 
 
 
dc41b0c
 
44ab0cd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dc41b0c
44ab0cd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
from langchain_community.vectorstores import FAISS
from langchain_core.documents import Document
from langchain_nvidia_ai_endpoints import NVIDIAEmbeddings
import os 
from google import genai
from google.genai import types

# Set up the Gemini API key
import os

def index_text():

    os.environ["NVIDIA_API_KEY"] =os.getenv("NVIDIA_API_KEY")

    
    nvidia_embeddings = NVIDIAEmbeddings(
        model="nvidia/llama-3.2-nv-embedqa-1b-v2",
        truncate="NONE"
    )
    vectorstore = FAISS.load_local("nvidia_faiss_index", embeddings=nvidia_embeddings,allow_dangerous_deserialization=True)
    return vectorstore


def answer_query(query, vectorstore):
    RAG_TEMPLATE = """

#CONTEXT:

{context}



QUERY:

{query}



Use the provided context to answer the user query. Only use the provided context to answer the query.

If you do not know the answer, or it's not contained in the provided context, respond with "I don't know".

"""
    os.environ["GEMINI_API_KEY"] = os.getenv("GEMINI_API_KEY")
    client = genai.Client()
    # Get relevant documents
    retriever = vectorstore.as_retriever()
    search_results = retriever.invoke(query, k=2)
    
    # Combine context from retrieved documents
    context = " ".join([doc.page_content for doc in search_results])
    
    # Build prompt
    prompt = RAG_TEMPLATE.format(context=context, query=query)

    # Generate response using Gemini
    response = client.models.generate_content(
        model="gemini-2.5-pro",
        contents=prompt,
        config=types.GenerateContentConfig(),
    )

    return response.text