Spaces:
Runtime error
Runtime error
from langchain.chat_models import ChatOpenAI | |
from langchain.llms import OpenAI | |
from langchain.memory import ConversationSummaryMemory | |
from langchain.schema import HumanMessage, SystemMessage | |
from langchain.chains import ConversationalRetrievalChain | |
from langchain.schema import AIMessage, HumanMessage | |
from langchain.document_loaders import PyPDFDirectoryLoader | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain.embeddings import OpenAIEmbeddings | |
from langchain.vectorstores import Chroma | |
import openai | |
import gradio as gr | |
import os | |
#os.envrion["OPENAI_API_KEY"] = "sk-..." # Replace with your key | |
# use the following line to load a directory of PDFs | |
loader = PyPDFDirectoryLoader("data/") | |
data = loader.load_and_split() | |
text_splitter = RecursiveCharacterTextSplitter( | |
chunk_size=500, | |
chunk_overlap=0 | |
) | |
all_splits = text_splitter.split_documents(data) | |
vectorstore = Chroma.from_documents( | |
documents=all_splits, | |
embedding=OpenAIEmbeddings() | |
) | |
llm = ChatOpenAI(temperature=1.0, model="gpt-4-1106-preview") | |
memory = ConversationSummaryMemory( | |
llm=llm, | |
memory_key="chat_history", | |
return_messages=True | |
) | |
retriever = vectorstore.as_retriever() | |
# Initialize the Conversational Retrieval Chain | |
qa_chain = ConversationalRetrievalChain.from_llm( | |
llm, | |
retriever=retriever, | |
memory=memory | |
) | |
def predict(message, history): | |
# Get a response from the Conversational Retrieval Chain | |
response = qa_chain.run(question=message) | |
# Extract and return the content of the response | |
return response # or modify as needed based on the response structure | |
demo = gr.ChatInterface(predict) | |
demo.launch(share=True) |