rasyosef's picture
Update app.py
4c5f0fe verified
raw
history blame
5.35 kB
import gradio as gr
from langchain.text_splitter import CharacterTextSplitter
from langchain.document_loaders import UnstructuredFileLoader
from langchain.vectorstores.faiss import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.chains import RetrievalQA
from langchain.prompts.prompt import PromptTemplate
from langchain.vectorstores.base import VectorStoreRetriever
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import torch
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
# Prompt template
template = """Instruction:
You are an AI assistant for answering questions about the provided context.
You are given the following extracted parts of a long document and a question. Provide a detailed answer.
If you don't know the answer, just say "Hmm, I'm not sure." Don't try to make up an answer.
=======
{context}
=======
Chat History:
{question}
Output:"""
QA_PROMPT = PromptTemplate(
template=template,
input_variables=["question", "context"]
)
# Returns a faiss vector store given a txt file
def prepare_vector_store(filename):
# Load data
loader = UnstructuredFileLoader(filename)
raw_documents = loader.load()
# Split the text
text_splitter = CharacterTextSplitter(
separator="\n\n",
chunk_size=400,
chunk_overlap=100,
length_function=len
)
documents = text_splitter.split_documents(raw_documents)
# Creating a vectorstore
embeddings = HuggingFaceEmbeddings()
vectorstore = FAISS.from_documents(documents, embeddings)
return vectorstore
# Load Phi-2 model from hugging face hub
model_id = "microsoft/phi-2"
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float32, device_map="cpu", trust_remote_code=True)
phi2 = pipeline("text-generation", tokenizer=tokenizer, model=model, max_new_tokens=64, device_map="auto") # GPU
phi2.tokenizer.pad_token_id = phi2.model.config.eos_token_id
hf_model = HuggingFacePipeline(pipeline=phi2)
# Retrieveal QA chian
def get_retrieval_qa_chain(filename):
llm = hf_model
retriever = VectorStoreRetriever(
vectorstore=prepare_vector_store(filename)
)
model = RetrievalQA.from_chain_type(
llm=llm,
retriever=retriever,
chain_type_kwargs={"prompt": QA_PROMPT},
)
return model
# Question Answering Chain
qa_chain = get_retrieval_qa_chain(filename="Oppenheimer-movie-wiki.txt")
# Generates response using the question answering chain defined earlier
def generate(question, chat_history):
query = ""
for req, res in chat_history:
query += f"User: {req}\n"
query += f"Assistant: {res}\n"
query += f"User: {question}"
result = qa_chain.invoke({"query": query})
response = result["result"].strip()
response = response.split("\n\n")[0].strip()
if "User:" in response:
response = response.split("User:")[0].strip()
if "INPUT:" in response:
response = response.split("INPUT:")[0].strip()
if "Assistant:" in response:
response = response.split("Assistant:")[1].strip()
chat_history.append((question, response))
return "", chat_history
# replaces the retreiver in the question answering chain whenever a new file is uploaded
def upload_file(qa_chain):
def uploader(file):
qa_chain.retriever = VectorStoreRetriever(
vectorstore=prepare_vector_store(file)
)
return file
return uploader
with gr.Blocks() as demo:
gr.Markdown("""
# RAG-Phi-2 Chatbot demo
### This demo uses the Phi-2 language model and Retrieval Augmented Generation (RAG) to allow you to add custom knowledge to the chatbot by uploading a txt file. Upload a txt file that contains the text data that you would like to augment the chatbot with.
### If you don't have one, there is a txt file already loaded, the new Oppenheimer movie's entire wikipedia page. The movie came out very recently in July, 2023, so the Phi-2 model is not aware of it.
The context size of the Phi-2 model is 2048 tokens, so even this medium size wikipedia page (11.5k tokens) does not fit in the context window.
Retrieval Augmented Generation (RAG) enables us to retrieve just the few small chunks of the document that are relevant to the our query and inject it into our prompt.
The chatbot is then able to answer questions by incorporating knowledge from the newly provided document. RAG can be used with thousands of documents, but this demo is limited to just one txt file.
""")
file_output = gr.File(label="txt file")
upload_button = gr.UploadButton(
label="Click to upload a txt file",
file_types=["text"],
file_count="single"
)
upload_button.upload(upload_file(qa_chain), upload_button, file_output)
chatbot = gr.Chatbot(label="RAG Phi-2 Chatbot")
msg = gr.Textbox(label="Message", placeholder="Enter text here")
clear = gr.ClearButton([msg, chatbot])
msg.submit(fn=generate, inputs=[msg, chatbot], outputs=[msg, chatbot])
examples = gr.Examples(
examples=[
"Who portrayed J. Robert Oppenheimer in the new Oppenheimer movie?",
"In the plot of the movie, why did Lewis Strauss resent Robert Oppenheimer?"
],
inputs=[msg],
)
demo.launch()