Shreyas094 commited on
Commit
23a46aa
·
verified ·
1 Parent(s): 8f54b98

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -34
app.py CHANGED
@@ -477,24 +477,6 @@ def create_web_search_vectors(search_results):
477
 
478
  return FAISS.from_documents(documents, embed)
479
 
480
- def critique_response(response, context, query):
481
- critique_prompt = f"""Given the following response, original context, and user query, identify any statements that might be inaccurate, unsupported by the context, or irrelevant to the query. Be specific about which parts may be hallucinations or extrapolations beyond the given information.
482
-
483
- User Query: {query}
484
-
485
- Response:
486
- {response}
487
-
488
- Original Context:
489
- {context}
490
-
491
- Critique:"""
492
-
493
- client = InferenceClient(model, token=huggingface_token)
494
- critique = client.text_generation(critique_prompt, max_new_tokens=500, temperature=0.2)
495
-
496
- return critique
497
-
498
  def get_response_with_search(query, model, num_calls=3, temperature=0.1):
499
  search_results = duckduckgo_search(query)
500
  web_search_database = create_web_search_vectors(search_results)
@@ -513,15 +495,16 @@ def get_response_with_search(query, model, num_calls=3, temperature=0.1):
513
  Write a detailed and complete response that answers the following user query: '{query}'
514
  Stick closely to the information provided in the context and avoid making unsupported claims."""
515
 
516
- client = InferenceClient(model, token=huggingface_token)
517
-
518
- # Generate initial response
519
- initial_response = client.text_generation(initial_prompt, max_new_tokens=1000, temperature=temperature)
520
-
521
- # Generate critique
522
- critique = critique_response(initial_response, context, query)
523
-
524
- final_prompt = f"""Given the following initial response, context, critique, and original query, provide a revised response that addresses the identified issues and sticks closely to the information provided in the context while fully answering the user's query.
 
525
 
526
  User Query: {query}
527
 
@@ -536,14 +519,36 @@ Critique:
536
 
537
  Revised Response:"""
538
 
539
- # Generate final response
540
- for chunk in client.text_generation(final_prompt, max_new_tokens=1500, temperature=temperature, stream=True):
541
- yield chunk, ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
542
 
543
- # Add a disclaimer
544
- disclaimer = ("\nNote: This response was generated by an AI model based on web search results. "
545
- "While efforts have been made to ensure accuracy, please verify important information from authoritative sources.")
546
- yield disclaimer, ""
 
 
547
 
548
 
549
  INSTRUCTION_PROMPTS = {
 
477
 
478
  return FAISS.from_documents(documents, embed)
479
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
480
  def get_response_with_search(query, model, num_calls=3, temperature=0.1):
481
  search_results = duckduckgo_search(query)
482
  web_search_database = create_web_search_vectors(search_results)
 
495
  Write a detailed and complete response that answers the following user query: '{query}'
496
  Stick closely to the information provided in the context and avoid making unsupported claims."""
497
 
498
+ try:
499
+ client = InferenceClient(model, token=huggingface_token)
500
+
501
+ # Generate initial response
502
+ initial_response = client.text_generation(initial_prompt, max_new_tokens=1000, temperature=temperature)
503
+
504
+ # Generate critique
505
+ critique = critique_response(initial_response, context, query, model)
506
+
507
+ final_prompt = f"""Given the following initial response, context, critique, and original query, provide a revised response that addresses the identified issues and sticks closely to the information provided in the context while fully answering the user's query.
508
 
509
  User Query: {query}
510
 
 
519
 
520
  Revised Response:"""
521
 
522
+ # Generate final response
523
+ for chunk in client.text_generation(final_prompt, max_new_tokens=1500, temperature=temperature, stream=True):
524
+ yield chunk, ""
525
+
526
+ # Add a disclaimer
527
+ disclaimer = ("\nNote: This response was generated by an AI model based on web search results. "
528
+ "While efforts have been made to ensure accuracy, please verify important information from authoritative sources.")
529
+ yield disclaimer, ""
530
+
531
+ except Exception as e:
532
+ logging.error(f"Error in multi-step generation process: {str(e)}")
533
+ yield f"An error occurred during the response generation process: {str(e)}", ""
534
+
535
+ def critique_response(response, context, query, model):
536
+ critique_prompt = f"""Given the following response, original context, and user query, identify any statements that might be inaccurate, unsupported by the context, or irrelevant to the query. Be specific about which parts may be hallucinations or extrapolations beyond the given information.
537
+
538
+ User Query: {query}
539
+
540
+ Response:
541
+ {response}
542
+
543
+ Original Context:
544
+ {context}
545
 
546
+ Critique:"""
547
+
548
+ client = InferenceClient(model, token=huggingface_token)
549
+ critique = client.text_generation(critique_prompt, max_new_tokens=500, temperature=0.2)
550
+
551
+ return critique
552
 
553
 
554
  INSTRUCTION_PROMPTS = {