|
import openai |
|
from langchain.vectorstores import Chroma |
|
from langchain.embeddings import OpenAIEmbeddings |
|
from retriever import * |
|
from chain import * |
|
import gradio as gr |
|
|
|
def chatbot(query): |
|
llm_response = qa_chain.run({"query": query}) |
|
return llm_response |
|
|
|
|
|
def load_embeddings_database_from_disk(persistence_directory, embeddings_generator): |
|
""" |
|
Load a Chroma vector database from disk. |
|
|
|
This function loads a Chroma vector database from the specified directory on disk. |
|
It expects the same persistence_directory and embedding function as used when creating the database. |
|
|
|
Args: |
|
persistence_directory (str): The directory where the database is stored on disk. |
|
embeddings_generator (obj): The embeddings generator function that was used when creating the database. |
|
|
|
Returns: |
|
vector_database (obj): The loaded Chroma vector database. |
|
""" |
|
|
|
|
|
|
|
vector_database = Chroma(persist_directory=persistence_directory, embedding_function=embeddings_generator) |
|
|
|
return vector_database |
|
|
|
|
|
|
|
persistence_directory = 'db' |
|
|
|
embeddings_generator = OpenAIEmbeddings(openai_api_key = openai.api_key) |
|
|
|
vector_database = load_embeddings_database_from_disk(persistence_directory, embeddings_generator) |
|
topk_documents = 2 |
|
|
|
retriever = initialize_document_retriever(topk_documents, vector_database) |
|
qa_chain = create_question_answering_chain(retriever) |
|
|
|
|
|
inputs = gr.inputs.Textbox(lines=7, label="Coversational Interface with Chat history") |
|
outputs = gr.outputs.Textbox(label="Reply") |
|
|
|
gr.Interface(fn=chatbot, inputs=inputs, outputs=outputs, title="Retrieval Augmented Question Answering", |
|
show_progress = True, theme="compact").launch(debug=True) |