Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import transformers | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import accelerate | |
| import einops | |
| import langchain | |
| import xformers | |
| import os | |
| import bitsandbytes | |
| import sentence_transformers | |
| import huggingface_hub | |
| import torch | |
| from torch import cuda, bfloat16 | |
| from transformers import StoppingCriteria, StoppingCriteriaList | |
| from langchain.llms import HuggingFacePipeline | |
| from langchain.document_loaders import TextLoader, DirectoryLoader | |
| from langchain.text_splitter import RecursiveCharacterTextSplitter | |
| from langchain.embeddings import HuggingFaceEmbeddings | |
| from langchain.vectorstores import FAISS | |
| from langchain.chains import ConversationalRetrievalChain | |
| from huggingface_hub import InferenceClient | |
| # Login to Hugging Face using a token | |
| # huggingface_hub.login(HF_TOKEN) | |
| """ | |
| Loading of the LLama3 model | |
| """ | |
| HF_TOKEN = os.environ.get("HF_TOKEN", None) | |
| model_id = 'meta-llama/Meta-Llama-3-8B' | |
| device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu' | |
| """set quantization configuration to load large model with less GPU memory | |
| this requires the `bitsandbytes` library""" | |
| bnb_config = transformers.BitsAndBytesConfig( | |
| load_in_4bit=True, | |
| bnb_4bit_quant_type='nf4', | |
| bnb_4bit_use_double_quant=True, | |
| bnb_4bit_compute_dtype=bfloat16 | |
| ) | |
| tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct",token=HF_TOKEN) | |
| model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", device_map="auto",token=HF_TOKEN,quantization_config=bnb_config) # to("cuda:0") | |
| terminators = [ | |
| tokenizer.eos_token_id, | |
| tokenizer.convert_tokens_to_ids("<|eot_id|>") | |
| ] | |
| """CPU""" | |
| # model_config = transformers.AutoConfig.from_pretrained( | |
| # model_id, | |
| # token=HF_TOKEN, | |
| # # use_auth_token=hf_auth | |
| # ) | |
| # model = transformers.AutoModelForCausalLM.from_pretrained( | |
| # model_id, | |
| # trust_remote_code=True, | |
| # config=model_config, | |
| # # quantization_config=bnb_config, | |
| # token=HF_TOKEN, | |
| # # use_auth_token=hf_auth | |
| # ) | |
| # model.eval() | |
| # tokenizer = transformers.AutoTokenizer.from_pretrained( | |
| # model_id, | |
| # token=HF_TOKEN, | |
| # # use_auth_token=hf_auth | |
| # ) | |
| # generate_text = transformers.pipeline( | |
| # model=self.model, tokenizer=self.tokenizer, | |
| # return_full_text=True, | |
| # task='text-generation', | |
| # temperature=0.01, | |
| # max_new_tokens=512 | |
| # ) | |
| """ | |
| Setting up the stop list to define stopping criteria. | |
| """ | |
| stop_list = ['\nHuman:', '\n```\n'] | |
| stop_token_ids = [tokenizer(x)['input_ids'] for x in stop_list] | |
| stop_token_ids = [torch.LongTensor(x).to(device) for x in stop_token_ids] | |
| # define custom stopping criteria object | |
| class StopOnTokens(StoppingCriteria): | |
| def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: | |
| for stop_ids in stop_token_ids: | |
| if torch.eq(input_ids[0][-len(stop_ids):], stop_ids).all(): | |
| return True | |
| return False | |
| stopping_criteria = StoppingCriteriaList([StopOnTokens()]) | |
| generate_text = transformers.pipeline( | |
| model=model, | |
| tokenizer=tokenizer, | |
| return_full_text=True, # langchain expects the full text | |
| task='text-generation', | |
| # we pass model parameters here too | |
| stopping_criteria=stopping_criteria, # without this model rambles during chat | |
| temperature=0.1, # 'randomness' of outputs, 0.0 is the min and 1.0 the max | |
| max_new_tokens=512, # max number of tokens to generate in the output | |
| repetition_penalty=1.1 # without this output begins repeating | |
| ) | |
| llm = HuggingFacePipeline(pipeline=generate_text) | |
| loader = DirectoryLoader('data2/text/', loader_cls=TextLoader) | |
| documents = loader.load() | |
| print('len of documents are',len(documents)) | |
| text_splitter = RecursiveCharacterTextSplitter(chunk_size=5000, chunk_overlap=250) | |
| all_splits = text_splitter.split_documents(documents) | |
| print(all_splits[0]) | |
| print("#########################################") | |
| print(all_splits[0]) | |
| print("#########################################") | |
| print(all_splits[0]) | |
| model_name = "sentence-transformers/all-mpnet-base-v2" | |
| model_kwargs = {"device": "cuda"} | |
| embeddings = HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs) | |
| # storing embeddings in the vector store | |
| vectorstore = FAISS.from_documents(all_splits, embeddings) | |
| chain = ConversationalRetrievalChain.from_llm(llm, vectorstore.as_retriever(), return_source_documents=True) | |
| chat_history = [] | |
| def format_prompt(query): | |
| # Construct a clear and structured prompt to guide the LLM's response | |
| prompt = f""" | |
| You are a knowledgeable assistant with access to a comprehensive database. | |
| I need you to answer my question and provide related information in a specific format. | |
| Here's what I need: | |
| 1. A brief, general response to my question based on related answers retrieved. | |
| 2. A JSON-formatted output containing: | |
| - "question": The original question. | |
| - "answer": The detailed answer. | |
| - "related_questions": A list of related questions and their answers, each as a dictionary with the keys: | |
| - "question": The related question. | |
| - "answer": The related answer. | |
| Here's my question: | |
| {query} | |
| Include a brief final answer without additional comments, sign-offs, or extra phrases. Be direct and to the point. | |
| """ | |
| return prompt | |
| def qa_infer(query): | |
| formatted_prompt = format_prompt(query) | |
| result = chain({"question": formatted_prompt, "chat_history": chat_history}) | |
| return result['answer'] | |
| # query = "What` is the best TS pin configuration for BQ24040 in normal battery charge mode" | |
| # qa_infer(query) | |
| EXAMPLES = [" How to use IPU1_0 instead of A15_0 to process NDK in TDA2x-EVM", | |
| "Can BQ25896 support I2C interface?", | |
| "Does TDA2 vout support bt656 8-bit mode?"] | |
| demo = gr.Interface(fn=qa_infer, inputs="text",allow_flagging='never', examples=EXAMPLES, | |
| cache_examples=False,outputs="text") | |
| # launch the app! | |
| #demo.launch(enable_queue = True,share=True) | |
| #demo.queue(default_enabled=True).launch(debug=True,share=True) | |
| demo.launch() |