AutomationBot / app.py
kishorefafa's picture
app.py
fa588e2 verified
raw
history blame
3.57 kB
# Import necessary libraries
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, pipeline
import gradio as gr
from google.colab import drive
import chromadb
from langchain.llms import HuggingFacePipeline
from langchain.document_loaders import PyPDFDirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
# Download the model from HuggingFace
model_name = "anakin87/zephyr-7b-alpha-sharded"
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16
)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.bfloat16,
quantization_config=bnb_config
)
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.bos_token_id = 1 # Set beginning of sentence token id
# Mount Google Drive and specify folder path
drive.mount('/content/drive')
folder_path = '/content/drive/MyDrive/TestcaseReport/'
# Load the documents from Google Drive
loader = PyPDFDirectoryLoader(folder_path)
documents = loader.load()
# Split the documents into small chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
all_splits = text_splitter.split_documents(documents)
# Specify embedding model
embedding_model_name = "sentence-transformers/all-mpnet-base-v2"
model_kwargs = {"device": "cpu"} # Using CPU since GPU is not available
embeddings = HuggingFaceEmbeddings(model_name=embedding_model_name, model_kwargs=model_kwargs)
# Embed document chunks
vectordb = Chroma.from_documents(documents=all_splits, embedding=embeddings, persist_directory="chroma_db")
# Specify the retriever
retriever = vectordb.as_retriever()
# Build HuggingFace pipeline for using zephyr-7b-alpha
hf_pipeline = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
use_cache=True,
device_map="auto",
max_length=2048,
do_sample=True,
top_k=5,
num_return_sequences=1,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id,
)
# Specify the llm
llm = HuggingFacePipeline(pipeline=hf_pipeline)
# Define the create_conversation function
def create_conversation(query: str, chat_history: list) -> tuple:
try:
memory = ConversationBufferMemory(
memory_key='chat_history',
return_messages=False
)
qa_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
memory=memory,
get_chat_history=lambda h: h,
)
result = qa_chain({'question': query, 'chat_history': chat_history})
chat_history.append((query, result['answer']))
return '', chat_history
except Exception as e:
chat_history.append((query, str(e)))
return '', chat_history
# Define the Gradio UI
with gr.Blocks() as demo:
chatbot = gr.Chatbot(label='My Chatbot')
msg = gr.Textbox()
clear = gr.ClearButton([msg, chatbot])
def submit_message(text):
_, chat_history = create_conversation(text, [])
chatbot.update(chat_history)
msg.submit(submit_message, [msg], [msg])
# Launch the Gradio demo
demo.launch()