thechaiexperiment commited on
Commit
cbc773f
·
1 Parent(s): d274691

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -393,7 +393,7 @@ def generate_answer(prompt, max_length=860, temperature=0.2):
393
  model_f = models['llm_model']
394
  inputs = tokenizer_f(prompt, return_tensors="pt", truncation=True)
395
  # Start timing
396
- start_time = time.time()
397
  # Generate the output
398
  output_ids = model_f.generate(
399
  inputs.input_ids,
@@ -403,9 +403,9 @@ def generate_answer(prompt, max_length=860, temperature=0.2):
403
  pad_token_id=tokenizer_f.eos_token_id
404
  )
405
  # End timing
406
- end_time = time.time()
407
  # Calculate the duration
408
- duration = end_time - start_time
409
  # Decode the answer
410
  answer = tokenizer_f.decode(output_ids[0], skip_special_tokens=True)
411
  # Extract keywords from the passage and answer
@@ -413,9 +413,9 @@ def generate_answer(prompt, max_length=860, temperature=0.2):
413
  answer_keywords = set(answer.lower().split())
414
  # Verify if the answer aligns with the passage
415
  if passage_keywords.intersection(answer_keywords):
416
- return answer, duration
417
  else:
418
- return "Sorry, I can't help with that.", duration
419
 
420
  def remove_answer_prefix(text):
421
  prefix = "Answer:"
@@ -477,8 +477,8 @@ entities = extract_entities(query_text)
477
  passage = enhance_passage_with_entities(combined_parts, entities)
478
  # Generate answer with the enhanced passage
479
  prompt = create_prompt(query_text, passage)
480
- answer, generation_time = generate_answer(prompt)
481
- print(f"\nTime taken to generate the answer: {generation_time:.2f} seconds")
482
  answer_part = answer.split("Answer:")[-1].strip()
483
  cleaned_answer = remove_answer_prefix(answer_part)
484
  final_answer = remove_incomplete_sentence(cleaned_answer)
 
393
  model_f = models['llm_model']
394
  inputs = tokenizer_f(prompt, return_tensors="pt", truncation=True)
395
  # Start timing
396
+ #start_time = time.time()
397
  # Generate the output
398
  output_ids = model_f.generate(
399
  inputs.input_ids,
 
403
  pad_token_id=tokenizer_f.eos_token_id
404
  )
405
  # End timing
406
+ #end_time = time.time()
407
  # Calculate the duration
408
+ #duration = end_time - start_time
409
  # Decode the answer
410
  answer = tokenizer_f.decode(output_ids[0], skip_special_tokens=True)
411
  # Extract keywords from the passage and answer
 
413
  answer_keywords = set(answer.lower().split())
414
  # Verify if the answer aligns with the passage
415
  if passage_keywords.intersection(answer_keywords):
416
+ return answer #, duration
417
  else:
418
+ return "Sorry, I can't help with that." #, duration
419
 
420
  def remove_answer_prefix(text):
421
  prefix = "Answer:"
 
477
  passage = enhance_passage_with_entities(combined_parts, entities)
478
  # Generate answer with the enhanced passage
479
  prompt = create_prompt(query_text, passage)
480
+ answer = generate_answer(prompt)
481
+ #print(f"\nTime taken to generate the answer: {generation_time:.2f} seconds")
482
  answer_part = answer.split("Answer:")[-1].strip()
483
  cleaned_answer = remove_answer_prefix(answer_part)
484
  final_answer = remove_incomplete_sentence(cleaned_answer)