|
from langchain_community.vectorstores import Chroma |
|
from langchain_huggingface import HuggingFaceEmbeddings |
|
from langchain_community.chat_models import ChatOpenAI |
|
from langchain_core.output_parsers import StrOutputParser |
|
from langchain import hub |
|
import gradio as gr |
|
import os |
|
|
|
|
|
openai_api_key = os.environ.get("OPENAI_API_KEY", "") |
|
|
|
|
|
llm = ChatOpenAI(openai_api_key=openai_api_key, model="gpt-3.5-turbo", temperature=0) |
|
parser = StrOutputParser() |
|
|
|
|
|
embedding_function = HuggingFaceEmbeddings( |
|
model_name="sentence-transformers/paraphrase-multilingual-mpnet-base-v2", |
|
model_kwargs={"device": "cpu"} |
|
) |
|
|
|
|
|
vectordb = Chroma( |
|
persist_directory="chroma_db", |
|
embedding_function=embedding_function |
|
) |
|
|
|
|
|
def responder_pregunta(query): |
|
docs = vectordb.similarity_search_with_score(query, k=5) |
|
prompt = hub.pull("rlm/rag-prompt") |
|
rag_chain = prompt | llm | parser |
|
|
|
context = [] |
|
for doc, score in docs: |
|
if score < 7: |
|
context.append(doc.page_content) |
|
|
|
if context: |
|
context_text = "\n".join(context) |
|
result = rag_chain.invoke({"context": context_text, "question": query}) |
|
return result |
|
else: |
|
return "No tengo informaci贸n suficiente para responder a esta pregunta." |
|
|
|
|
|
gr.Interface( |
|
fn=responder_pregunta, |
|
inputs=gr.Textbox(label="Pregunta sobre nutrici贸n"), |
|
outputs="text", |
|
title="Sistema de Preguntas sobre Nutrici贸n", |
|
description="Pregunta sobre el contenido del manual cl铆nico. Basado en RAG con LangChain y Hugging Face." |
|
).launch() |
|
|