Pijush2023 commited on
Commit
eae733b
·
verified ·
1 Parent(s): fda3bca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -299
app.py CHANGED
@@ -311,260 +311,15 @@ chain_neo4j = (
311
 
312
 
313
 
314
-
315
-
316
-
317
- # def generate_answer(message, choice, retrieval_mode, selected_model):
318
- # logging.debug(f"generate_answer called with choice: {choice} and retrieval_mode: {retrieval_mode}")
319
-
320
- # try:
321
- # # Handle hotel-related queries
322
- # if "hotel" in message.lower() or "hotels" in message.lower() and "birmingham" in message.lower():
323
- # response = fetch_google_hotels()
324
- # return response, extract_addresses(response)
325
-
326
- # # Handle restaurant-related queries
327
- # if "restaurant" in message.lower() or "restaurants" in message.lower() and "birmingham" in message.lower():
328
- # response = fetch_yelp_restaurants()
329
- # return response, extract_addresses(response)
330
-
331
- # # Handle flight-related queries
332
- # if "flight" in message.lower() or "flights" in message.lower() and "birmingham" in message.lower():
333
- # response = fetch_google_flights()
334
- # return response, extract_addresses(response)
335
-
336
- # if retrieval_mode == "VDB":
337
- # if selected_model == chat_model:
338
- # # Use GPT-4o with its vector store and template
339
- # retriever = gpt_retriever
340
- # prompt_template = QA_CHAIN_PROMPT_1 if choice == "Details" else QA_CHAIN_PROMPT_2
341
- # context = retriever.get_relevant_documents(message)
342
- # prompt = prompt_template.format(context=context, question=message)
343
-
344
- # qa_chain = RetrievalQA.from_chain_type(
345
- # llm=chat_model,
346
- # chain_type="stuff",
347
- # retriever=retriever,
348
- # chain_type_kwargs={"prompt": prompt_template}
349
- # )
350
- # response = qa_chain({"query": message})
351
- # return response['result'], extract_addresses(response['result'])
352
-
353
- # elif selected_model == phi_pipe:
354
- # # Use Phi-3.5 with its vector store and a simplified prompt
355
- # retriever = phi_retriever
356
- # context = retriever.get_relevant_documents(message)
357
- # prompt = f"""
358
- # Here is the information based on the documents provided:
359
- # {context}
360
-
361
- # {message}
362
- # """
363
-
364
- # logging.debug(f"Phi-3.5 Prompt: {prompt}")
365
-
366
- # response = selected_model(prompt, **{
367
- # "max_new_tokens": 300,
368
- # "return_full_text": False,
369
- # "temperature": 0.5,
370
- # "do_sample": False,
371
- # })
372
-
373
- # if response:
374
- # generated_text = response[0]['generated_text']
375
- # logging.debug(f"Phi-3.5 Response: {generated_text}")
376
- # return generated_text, extract_addresses(generated_text)
377
- # else:
378
- # logging.error("Phi-3.5 did not return any response.")
379
- # return "No response generated.", []
380
-
381
- # elif retrieval_mode == "KGF":
382
- # response = chain_neo4j.invoke({"question": message})
383
- # return response, extract_addresses(response)
384
- # else:
385
- # return "Invalid retrieval mode selected.", []
386
-
387
- # except Exception as e:
388
- # logging.error(f"Error in generate_answer: {e}")
389
- # return "Sorry, I encountered an error while processing your request.", []
390
-
391
-
392
- # Short Prompt Template for Phi-3.5 Proprietary Model
393
-
394
- # phi_short_template = f"""
395
- # As an expert on Birmingham, Alabama, I will provide concise, accurate, and informative responses to your queries based on 128 token limit . Given the sunny weather today, {current_date}, feel free to ask me anything you need to know about the city.
396
- # Provide only the direct answer to the question without any follow-up questions.
397
- # {{context}}
398
- # Question: {{question}}
399
- # Answer:
400
  # """
401
-
402
-
403
-
404
- # def generate_answer(message, choice, retrieval_mode, selected_model):
405
- # logging.debug(f"generate_answer called with choice: {choice} and retrieval_mode: {retrieval_mode}")
406
-
407
- # try:
408
- # if "hotel" in message.lower() or "hotels" in message.lower() and "birmingham" in message.lower():
409
- # response = fetch_google_hotels()
410
- # return response, extract_addresses(response)
411
-
412
- # if "restaurant" in message.lower() or "restaurants" in message.lower() and "birmingham" in message.lower():
413
- # response = fetch_yelp_restaurants()
414
- # return response, extract_addresses(response)
415
-
416
- # if "flight" in message.lower() or "flights" in message.lower() and "birmingham" in message.lower():
417
- # response = fetch_google_flights()
418
- # return response, extract_addresses(response)
419
-
420
- # if retrieval_mode == "VDB":
421
- # if selected_model == chat_model:
422
- # retriever = gpt_retriever
423
- # prompt_template = QA_CHAIN_PROMPT_1 if choice == "Details" else QA_CHAIN_PROMPT_2
424
- # context = retriever.get_relevant_documents(message)
425
- # prompt = prompt_template.format(context=context, question=message)
426
-
427
- # qa_chain = RetrievalQA.from_chain_type(
428
- # llm=chat_model,
429
- # chain_type="stuff",
430
- # retriever=retriever,
431
- # chain_type_kwargs={"prompt": prompt_template}
432
- # )
433
- # response = qa_chain({"query": message})
434
- # return response['result'], extract_addresses(response['result'])
435
-
436
- # elif selected_model == phi_pipe:
437
- # retriever = phi_retriever
438
- # context = retriever.get_relevant_documents(message)
439
- # prompt = phi_short_template.format(context=context, question=message)
440
-
441
- # logging.debug(f"Phi-3.5 Prompt: {prompt}")
442
-
443
- # response = selected_model(prompt, **{
444
- # "max_new_tokens": 160, # Increased to handle longer responses
445
- # "return_full_text": True,
446
- # "temperature": 0.7, # Adjusted to avoid cutting off
447
- # "do_sample": True, # Allow sampling to increase response diversity
448
- # })
449
-
450
- # if response:
451
- # generated_text = response[0]['generated_text']
452
- # logging.debug(f"Phi-3.5 Response: {generated_text}")
453
- # cleaned_response = clean_response(generated_text)
454
- # return cleaned_response, extract_addresses(cleaned_response)
455
- # else:
456
- # logging.error("Phi-3.5 did not return any response.")
457
- # return "No response generated.", []
458
-
459
- # elif retrieval_mode == "KGF":
460
- # response = chain_neo4j.invoke({"question": message})
461
- # return response, extract_addresses(response)
462
- # else:
463
- # return "Invalid retrieval mode selected.", []
464
-
465
- # except Exception as e:
466
- # logging.error(f"Error in generate_answer: {e}")
467
- # return "Sorry, I encountered an error while processing your request.", []
468
-
469
-
470
-
471
- # def generate_answer(message, choice, retrieval_mode, selected_model):
472
- # logging.debug(f"generate_answer called with choice: {choice} and retrieval_mode: {retrieval_mode}")
473
-
474
- # try:
475
- # # Handle hotel-related queries
476
- # if "hotel" in message.lower() or "hotels" in message.lower() and "birmingham" in message.lower():
477
- # response = fetch_google_hotels()
478
- # return response, extract_addresses(response)
479
-
480
- # # Handle restaurant-related queries
481
- # if "restaurant" in message.lower() or "restaurants" in message.lower() and "birmingham" in message.lower():
482
- # response = fetch_yelp_restaurants()
483
- # return response, extract_addresses(response)
484
-
485
- # # Handle flight-related queries
486
- # if "flight" in message.lower() or "flights" in message.lower() and "birmingham" in message.lower():
487
- # response = fetch_google_flights()
488
- # return response, extract_addresses(response)
489
-
490
- # if retrieval_mode == "VDB":
491
- # if selected_model == chat_model:
492
- # retriever = gpt_retriever
493
- # prompt_template = QA_CHAIN_PROMPT_1 if choice == "Details" else QA_CHAIN_PROMPT_2
494
- # context = retriever.get_relevant_documents(message)
495
- # prompt = prompt_template.format(context=context, question=message)
496
-
497
- # qa_chain = RetrievalQA.from_chain_type(
498
- # llm=chat_model,
499
- # chain_type="stuff",
500
- # retriever=retriever,
501
- # chain_type_kwargs={"prompt": prompt_template}
502
- # )
503
- # response = qa_chain({"query": message})
504
- # return response['result'], extract_addresses(response['result'])
505
-
506
- # elif selected_model == phi_pipe:
507
- # retriever = phi_retriever
508
- # context_documents = retriever.get_relevant_documents(message)
509
- # context = "\n".join([doc.page_content for doc in context_documents])
510
-
511
- # prompt = phi_short_template.format(context=context, question=message)
512
-
513
- # logging.debug(f"Phi-3.5 Prompt: {prompt}")
514
-
515
- # response = selected_model(prompt, **{
516
- # "max_new_tokens": 160, # Increased to handle longer responses
517
- # "return_full_text": True,
518
- # "temperature": 0.7, # Adjusted to avoid cutting off
519
- # "do_sample": True, # Allow sampling to increase response diversity
520
- # })
521
-
522
- # if response:
523
- # generated_text = response[0]['generated_text']
524
- # logging.debug(f"Phi-3.5 Response: {generated_text}")
525
- # cleaned_response = clean_response(generated_text)
526
- # return cleaned_response, extract_addresses(cleaned_response)
527
- # else:
528
- # logging.error("Phi-3.5 did not return any response.")
529
- # return "No response generated.", []
530
-
531
- # elif retrieval_mode == "KGF":
532
- # response = chain_neo4j.invoke({"question": message})
533
- # return response, extract_addresses(response)
534
- # else:
535
- # return "Invalid retrieval mode selected.", []
536
-
537
- # except Exception as e:
538
- # logging.error(f"Error in generate_answer: {e}")
539
- # return "Sorry, I encountered an error while processing your request.", []
540
-
541
-
542
-
543
- # import re
544
-
545
- # def clean_response(response_text):
546
- # # Remove any metadata-like information and focus on the main content
547
- # # Removes "Document(metadata=...)" and other similar patterns
548
- # cleaned_response = re.sub(r'Document\(metadata=.*?\),?\s*', '', response_text, flags=re.DOTALL)
549
- # cleaned_response = re.sub(r'page_content=".*?"\),?', '', cleaned_response, flags=re.DOTALL)
550
- # cleaned_response = re.sub(r'\[.*?\]', '', cleaned_response, flags=re.DOTALL) # Remove content in brackets
551
- # cleaned_response = re.sub(r'\s+', ' ', cleaned_response).strip()
552
- # # Remove any unwanted follow-up questions or unnecessary text
553
- # cleaned_response = re.sub(r'Question:.*\nAnswer:', '', cleaned_response, flags=re.DOTALL).strip()
554
- # return cleaned_response
555
-
556
-
557
-
558
-
559
- # Define the custom template for Phi-3.5
560
- phi_custom_template = """
561
- <|system|>
562
- You are a helpful assistant.<|end|>
563
- <|user|>
564
- {context}
565
- {question}<|end|>
566
- <|assistant|>
567
- """
568
  # import traceback
569
 
570
  # def generate_answer(message, choice, retrieval_mode, selected_model):
@@ -661,60 +416,44 @@ You are a helpful assistant.<|end|>
661
 
662
 
663
 
664
- # def bot(history, choice, tts_choice, retrieval_mode, model_choice):
665
- # if not history:
666
- # return history
667
 
668
- # # Select the model
669
- # selected_model = chat_model if model_choice == "GPT-4o" else phi_pipe
670
 
671
- # response, addresses = generate_answer(history[-1][0], choice, retrieval_mode, selected_model)
672
- # history[-1][1] = ""
673
 
674
- # with concurrent.futures.ThreadPoolExecutor() as executor:
675
- # if tts_choice == "Alpha":
676
- # audio_future = executor.submit(generate_audio_elevenlabs, response)
677
- # elif tts_choice == "Beta":
678
- # audio_future = executor.submit(generate_audio_parler_tts, response)
679
- # # elif tts_choice == "Gamma":
680
- # # audio_future = executor.submit(generate_audio_mars5, response)
681
 
682
- # for character in response:
683
- # history[-1][1] += character
684
- # time.sleep(0.05)
685
- # yield history, None
686
 
687
- # audio_path = audio_future.result()
688
- # yield history, audio_path
689
 
690
- # history.append([response, None])
691
 
692
 
693
- # phi_custom_template = """
694
- # <|system|>
695
- # You are a helpful assistant who provides clear, organized, and conversational responses.<|end|>
696
- # <|user|>
697
- # {context}
698
- # Question: {question}<|end|>
699
- # <|assistant|>
700
- # Sure! Here's the information you requested:
701
- # """
702
- import re
703
-
704
- def clean_response(response_text):
705
- # Remove system and user tags
706
- response_text = re.sub(r'<\|system\|>.*?<\|end\|>', '', response_text, flags=re.DOTALL)
707
- response_text = re.sub(r'<\|user\|>.*?<\|end\|>', '', response_text, flags=re.DOTALL)
708
- response_text = re.sub(r'<\|assistant\|>', '', response_text, flags=re.DOTALL)
709
-
710
- # Clean up the text by removing extra whitespace
711
- cleaned_response = response_text.strip()
712
- cleaned_response = re.sub(r'\s+', ' ', cleaned_response)
713
-
714
- # Ensure the response is conversational and organized
715
- cleaned_response = cleaned_response.replace('1.', '\n1.').replace('2.', '\n2.').replace('3.', '\n3.').replace('4.', '\n4.').replace('5.', '\n5.')
716
-
717
- return cleaned_response
718
 
719
 
720
  import traceback
 
311
 
312
 
313
 
314
+ # # Define the custom template for Phi-3.5
315
+ # phi_custom_template = """
316
+ # <|system|>
317
+ # You are a helpful assistant.<|end|>
318
+ # <|user|>
319
+ # {context}
320
+ # {question}<|end|>
321
+ # <|assistant|>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322
  # """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323
  # import traceback
324
 
325
  # def generate_answer(message, choice, retrieval_mode, selected_model):
 
416
 
417
 
418
 
419
+ def bot(history, choice, tts_choice, retrieval_mode, model_choice):
420
+ if not history:
421
+ return history
422
 
423
+ # Select the model
424
+ selected_model = chat_model if model_choice == "GPT-4o" else phi_pipe
425
 
426
+ response, addresses = generate_answer(history[-1][0], choice, retrieval_mode, selected_model)
427
+ history[-1][1] = ""
428
 
429
+ with concurrent.futures.ThreadPoolExecutor() as executor:
430
+ if tts_choice == "Alpha":
431
+ audio_future = executor.submit(generate_audio_elevenlabs, response)
432
+ elif tts_choice == "Beta":
433
+ audio_future = executor.submit(generate_audio_parler_tts, response)
434
+ # elif tts_choice == "Gamma":
435
+ # audio_future = executor.submit(generate_audio_mars5, response)
436
 
437
+ for character in response:
438
+ history[-1][1] += character
439
+ time.sleep(0.05)
440
+ yield history, None
441
 
442
+ audio_path = audio_future.result()
443
+ yield history, audio_path
444
 
445
+ history.append([response, None])
446
 
447
 
448
+ phi_custom_template = """
449
+ <|system|>
450
+ You are a helpful assistant who provides clear, organized, and conversational responses.<|end|>
451
+ <|user|>
452
+ {context}
453
+ Question: {question}<|end|>
454
+ <|assistant|>
455
+ Sure! Here's the information you requested:
456
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
457
 
458
 
459
  import traceback