import openai from langchain.vectorstores import Chroma from langchain.embeddings import OpenAIEmbeddings from retriever import * from chain import * import gradio as gr def chatbot(query): llm_response = qa_chain.run({"query": query}) return llm_response def load_embeddings_database_from_disk(persistence_directory, embeddings_generator): """ Load a Chroma vector database from disk. This function loads a Chroma vector database from the specified directory on disk. It expects the same persistence_directory and embedding function as used when creating the database. Args: persistence_directory (str): The directory where the database is stored on disk. embeddings_generator (obj): The embeddings generator function that was used when creating the database. Returns: vector_database (obj): The loaded Chroma vector database. """ # Load the Chroma vector database from the persistence directory. # The embedding_function parameter should be the same as the one used when the database was created. vector_database = Chroma(persist_directory=persistence_directory, embedding_function=embeddings_generator) return vector_database # Specify the directory where the database will be stored when it's persisted. persistence_directory = 'db' # Create and persist the embeddings for the documents. embeddings_generator = OpenAIEmbeddings(openai_api_key = openai.api_key) # Load the Chroma vector database from disk. vector_database = load_embeddings_database_from_disk(persistence_directory, embeddings_generator) topk_documents = 2 # Creating the retriever on top documents. retriever = initialize_document_retriever(topk_documents, vector_database) qa_chain = create_question_answering_chain(retriever) inputs = gr.inputs.Textbox(lines=7, label="Coversational Interface with Chat history") outputs = gr.outputs.Textbox(label="Reply") gr.Interface(fn=chatbot, inputs=inputs, outputs=outputs, title="Retrieval Augmented Question Answering", show_progress = True, theme="compact").launch(debug=True)