kishorefafa commited on
Commit
fa588e2
·
verified ·
1 Parent(s): 05465f0
Files changed (1) hide show
  1. app.py +8 -20
app.py CHANGED
@@ -1,14 +1,8 @@
1
- # Install dependencies
2
- pip install -q transformers peft accelerate bitsandbytes safetensors sentencepiece streamlit chromadb langchain sentence-transformers gradio pypdf
3
-
4
  # Import necessary libraries
5
  import torch
6
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, pipeline
7
-
8
- import os
9
  import gradio as gr
10
  from google.colab import drive
11
-
12
  import chromadb
13
  from langchain.llms import HuggingFacePipeline
14
  from langchain.document_loaders import PyPDFDirectoryLoader
@@ -60,7 +54,7 @@ vectordb = Chroma.from_documents(documents=all_splits, embedding=embeddings, per
60
  retriever = vectordb.as_retriever()
61
 
62
  # Build HuggingFace pipeline for using zephyr-7b-alpha
63
- pipeline = pipeline(
64
  "text-generation",
65
  model=model,
66
  tokenizer=tokenizer,
@@ -75,7 +69,7 @@ pipeline = pipeline(
75
  )
76
 
77
  # Specify the llm
78
- llm = HuggingFacePipeline(pipeline=pipeline)
79
 
80
  # Define the create_conversation function
81
  def create_conversation(query: str, chat_history: list) -> tuple:
@@ -96,26 +90,20 @@ def create_conversation(query: str, chat_history: list) -> tuple:
96
  return '', chat_history
97
 
98
  except Exception as e:
99
- chat_history.append((query, e))
100
  return '', chat_history
101
 
102
- def ask_question(query: str):
103
- response = create_conversation(query, [])
104
- gen_out = response[1][0][1]
105
- response_start_token = "Helpful Answer:"
106
- idx = gen_out.index(response_start_token)
107
- rag_prompt = gen_out[:idx]
108
- response_text = gen_out[idx:]
109
-
110
- return rag_prompt, response_text
111
-
112
  # Define the Gradio UI
113
  with gr.Blocks() as demo:
114
  chatbot = gr.Chatbot(label='My Chatbot')
115
  msg = gr.Textbox()
116
  clear = gr.ClearButton([msg, chatbot])
117
 
118
- msg.submit(create_conversation, [msg, chatbot], [msg, chatbot])
 
 
 
 
119
 
120
  # Launch the Gradio demo
121
  demo.launch()
 
 
 
 
1
  # Import necessary libraries
2
  import torch
3
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, pipeline
 
 
4
  import gradio as gr
5
  from google.colab import drive
 
6
  import chromadb
7
  from langchain.llms import HuggingFacePipeline
8
  from langchain.document_loaders import PyPDFDirectoryLoader
 
54
  retriever = vectordb.as_retriever()
55
 
56
  # Build HuggingFace pipeline for using zephyr-7b-alpha
57
+ hf_pipeline = pipeline(
58
  "text-generation",
59
  model=model,
60
  tokenizer=tokenizer,
 
69
  )
70
 
71
  # Specify the llm
72
+ llm = HuggingFacePipeline(pipeline=hf_pipeline)
73
 
74
  # Define the create_conversation function
75
  def create_conversation(query: str, chat_history: list) -> tuple:
 
90
  return '', chat_history
91
 
92
  except Exception as e:
93
+ chat_history.append((query, str(e)))
94
  return '', chat_history
95
 
 
 
 
 
 
 
 
 
 
 
96
  # Define the Gradio UI
97
  with gr.Blocks() as demo:
98
  chatbot = gr.Chatbot(label='My Chatbot')
99
  msg = gr.Textbox()
100
  clear = gr.ClearButton([msg, chatbot])
101
 
102
+ def submit_message(text):
103
+ _, chat_history = create_conversation(text, [])
104
+ chatbot.update(chat_history)
105
+
106
+ msg.submit(submit_message, [msg], [msg])
107
 
108
  # Launch the Gradio demo
109
  demo.launch()