Spaces:
Sleeping
Sleeping
from langchain_community.vectorstores import FAISS | |
from langchain_core.documents import Document | |
from langchain_nvidia_ai_endpoints import NVIDIAEmbeddings | |
import os | |
from google import genai | |
from google.genai import types | |
# Set up the Gemini API key | |
import os | |
def index_text(): | |
os.environ["NVIDIA_API_KEY"] =os.getenv("NVIDIA_API_KEY") | |
nvidia_embeddings = NVIDIAEmbeddings( | |
model="nvidia/llama-3.2-nv-embedqa-1b-v2", | |
truncate="NONE" | |
) | |
vectorstore = FAISS.load_local("nvidia_faiss_index", embeddings=nvidia_embeddings,allow_dangerous_deserialization=True) | |
return vectorstore | |
def answer_query(query, vectorstore): | |
RAG_TEMPLATE = """ | |
#CONTEXT: | |
{context} | |
QUERY: | |
{query} | |
Use the provided context to answer the user query. Only use the provided context to answer the query. | |
If you do not know the answer, or it's not contained in the provided context, respond with "I don't know". | |
""" | |
os.environ["GEMINI_API_KEY"] = os.getenv("GEMINI_API_KEY") | |
client = genai.Client() | |
# Get relevant documents | |
retriever = vectorstore.as_retriever() | |
search_results = retriever.invoke(query, k=2) | |
# Combine context from retrieved documents | |
context = " ".join([doc.page_content for doc in search_results]) | |
# Build prompt | |
prompt = RAG_TEMPLATE.format(context=context, query=query) | |
# Generate response using Gemini | |
response = client.models.generate_content( | |
model="gemini-2.5-pro", | |
contents=prompt, | |
config=types.GenerateContentConfig(), | |
) | |
return response.text | |