Shreyas094 commited on
Commit
84663a3
·
verified ·
1 Parent(s): 3d30d16

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -2
app.py CHANGED
@@ -233,10 +233,11 @@ def generate_chunked_response(model, prompt, max_tokens=1000, max_chunks=5):
233
  full_response += chunk
234
  except Exception as e:
235
  print(f"Error in generate_chunked_response: {e}")
 
 
236
  if "Input validation error" in str(e):
237
- # If we hit the token limit, return what we have so far
238
  return full_response if full_response else "The input was too long to process. Please try a shorter query."
239
- break
240
  return full_response.strip()
241
 
242
  def extract_text_from_webpage(html):
@@ -515,6 +516,11 @@ def extract_answer(full_response, instructions=None):
515
  full_response = '\n'.join(lines)
516
 
517
  return full_response.strip()
 
 
 
 
 
518
 
519
  # Gradio interface
520
  with gr.Blocks() as demo:
 
233
  full_response += chunk
234
  except Exception as e:
235
  print(f"Error in generate_chunked_response: {e}")
236
+ print(f"Prompt: {prompt}")
237
+ print(f"Full response so far: {full_response}")
238
  if "Input validation error" in str(e):
 
239
  return full_response if full_response else "The input was too long to process. Please try a shorter query."
240
+ raise # Re-raise the exception to be caught in ask_question
241
  return full_response.strip()
242
 
243
  def extract_text_from_webpage(html):
 
516
  full_response = '\n'.join(lines)
517
 
518
  return full_response.strip()
519
+ except Exception as e:
520
+ print(f"Error in extract_answer: {e}")
521
+ print(f"Full response: {full_response}")
522
+ print(f"Instructions: {instructions}")
523
+ raise # Re-raise the exception to be caught in ask_question
524
 
525
  # Gradio interface
526
  with gr.Blocks() as demo: