Update app.py
Browse files
app.py
CHANGED
@@ -160,11 +160,9 @@ def generate_response(query, model, template):
|
|
160 |
progress_text = "Running Inference. Please wait."
|
161 |
my_bar = st.progress(0, text=progress_text)
|
162 |
# fill those as appropriate
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
my_bar.progress(0.1, "Running RAG. Please wait.")
|
168 |
context = fetch_context(db, model, model_name, query, template)
|
169 |
|
170 |
my_bar.progress(0.7, "Generating Answer. Please wait.")
|
@@ -175,7 +173,7 @@ def generate_response(query, model, template):
|
|
175 |
my_bar.progress(0.9, "Post Processing. Please wait.")
|
176 |
|
177 |
my_bar.progress(1.0, "Done")
|
178 |
-
time.
|
179 |
my_bar.empty()
|
180 |
return response
|
181 |
|
@@ -204,9 +202,10 @@ def set_as_background_img(png_file):
|
|
204 |
|
205 |
|
206 |
def stream_to_screen(response):
|
|
|
207 |
for word in response.split():
|
208 |
yield word + " "
|
209 |
-
time.sleep(0.
|
210 |
|
211 |
|
212 |
if __name__=="__main__":
|
@@ -276,15 +275,12 @@ if __name__=="__main__":
|
|
276 |
Question: {question}\n> Context:\n>>>\n{context}\n>>>\nRelevant parts"""}
|
277 |
|
278 |
# Loading and caching db and model
|
279 |
-
my_bar = st.progress(0, "Loading Database. Please wait.")
|
280 |
-
my_bar.progress(0.1, "Loading Embedding & Database. Please wait.")
|
|
|
|
|
281 |
db = load_db(device)
|
282 |
-
my_bar.progress(0.7, "Loading Model. Please wait.")
|
283 |
model, tokenizer = load_model(model_name)
|
284 |
-
my_bar.progress(1.0, "Done")
|
285 |
-
time. sleep(1)
|
286 |
-
my_bar.empty()
|
287 |
-
|
288 |
|
289 |
response = False
|
290 |
user_question = st.chat_input('What do you want to ask ..')
|
@@ -294,12 +290,12 @@ if __name__=="__main__":
|
|
294 |
if user_question is not None and user_question!="":
|
295 |
with st.chat_message("Human", avatar="π§π»"):
|
296 |
st.write(user_question)
|
297 |
-
|
298 |
if response:
|
299 |
with st.chat_message("AI", avatar="ποΈ"):
|
300 |
# to empty response container after first pass
|
301 |
st.write(" ")
|
302 |
-
|
303 |
response = generate_response(user_question, model, all_templates)
|
304 |
with st.chat_message("AI", avatar="ποΈ"):
|
305 |
st.write_stream(stream_to_screen(response))
|
|
|
160 |
progress_text = "Running Inference. Please wait."
|
161 |
my_bar = st.progress(0, text=progress_text)
|
162 |
# fill those as appropriate
|
163 |
+
my_bar.progress(0.3, "Loading Model. Please wait.")
|
164 |
+
time.sleep(1)
|
165 |
+
my_bar.progress(0.5, "Running RAG. Please wait.")
|
|
|
|
|
166 |
context = fetch_context(db, model, model_name, query, template)
|
167 |
|
168 |
my_bar.progress(0.7, "Generating Answer. Please wait.")
|
|
|
173 |
my_bar.progress(0.9, "Post Processing. Please wait.")
|
174 |
|
175 |
my_bar.progress(1.0, "Done")
|
176 |
+
time.sleep(1)
|
177 |
my_bar.empty()
|
178 |
return response
|
179 |
|
|
|
202 |
|
203 |
|
204 |
def stream_to_screen(response):
|
205 |
+
print(response)
|
206 |
for word in response.split():
|
207 |
yield word + " "
|
208 |
+
time.sleep(0.15)
|
209 |
|
210 |
|
211 |
if __name__=="__main__":
|
|
|
275 |
Question: {question}\n> Context:\n>>>\n{context}\n>>>\nRelevant parts"""}
|
276 |
|
277 |
# Loading and caching db and model
|
278 |
+
#my_bar = st.progress(0, "Loading Database. Please wait.")
|
279 |
+
#my_bar.progress(0.1, "Loading Embedding & Database. Please wait.")
|
280 |
+
#my_bar.progress(0.7, "Loading Model. Please wait.")
|
281 |
+
|
282 |
db = load_db(device)
|
|
|
283 |
model, tokenizer = load_model(model_name)
|
|
|
|
|
|
|
|
|
284 |
|
285 |
response = False
|
286 |
user_question = st.chat_input('What do you want to ask ..')
|
|
|
290 |
if user_question is not None and user_question!="":
|
291 |
with st.chat_message("Human", avatar="π§π»"):
|
292 |
st.write(user_question)
|
293 |
+
"""
|
294 |
if response:
|
295 |
with st.chat_message("AI", avatar="ποΈ"):
|
296 |
# to empty response container after first pass
|
297 |
st.write(" ")
|
298 |
+
"""
|
299 |
response = generate_response(user_question, model, all_templates)
|
300 |
with st.chat_message("AI", avatar="ποΈ"):
|
301 |
st.write_stream(stream_to_screen(response))
|