Trabis commited on
Commit
8855231
·
verified ·
1 Parent(s): eeafa3f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -72
app.py CHANGED
@@ -328,52 +328,17 @@ textarea::placeholder {
328
  }
329
  """
330
 
331
- # def process_question(question: str) -> Iterator[str]:
332
- # """
333
- # Process the question and return a response generator for streaming.
334
- # """
335
- # if question in question_cache:
336
- # yield question_cache[question][0]
337
- # return
338
-
339
- # relevant_docs = retriever(question)
340
- # context = "\n".join([doc.page_content for doc in relevant_docs])
341
-
342
- # prompt = prompt_template.format_messages(
343
- # context=context,
344
- # question=question
345
- # )
346
-
347
- # full_response = ""
348
- # try:
349
- # for chunk in llm.stream(prompt):
350
- # if isinstance(chunk, str):
351
- # current_chunk = chunk
352
- # else:
353
- # current_chunk = chunk.content
354
-
355
- # full_response += current_chunk
356
- # yield full_response # Send the updated response in streaming
357
-
358
- # question_cache[question] = (full_response, context)
359
- # except Exception as e:
360
- # yield f"Erreur lors du traitement : {str(e)}"
361
-
362
  def process_question(question: str) -> Iterator[str]:
363
  """
364
- Process the question and return a response generator for streaming, including sources.
365
  """
366
  if question in question_cache:
367
  yield question_cache[question][0]
368
  return
369
 
370
- # Récupérer les documents pertinents
371
  relevant_docs = retriever(question)
372
  context = "\n".join([doc.page_content for doc in relevant_docs])
373
- sources = [doc.metadata.get("source", "Source inconnue") for doc in relevant_docs]
374
- sources = os.path.splitext(sources[0])[0] if sources else "غير معروف"
375
 
376
- # Générer le prompt
377
  prompt = prompt_template.format_messages(
378
  context=context,
379
  question=question
@@ -381,7 +346,6 @@ def process_question(question: str) -> Iterator[str]:
381
 
382
  full_response = ""
383
  try:
384
- # Streaming de la réponse
385
  for chunk in llm.stream(prompt):
386
  if isinstance(chunk, str):
387
  current_chunk = chunk
@@ -389,19 +353,55 @@ def process_question(question: str) -> Iterator[str]:
389
  current_chunk = chunk.content
390
 
391
  full_response += current_chunk
392
- yield full_response # Envoyer la réponse mise à jour en streaming
393
-
394
- # Ajouter les sources à la réponse finale
395
- if sources:
396
- sources_str = "\nSources :\n" + "\n".join(f"- {source}" for source in sources)
397
- full_response += sources_str
398
- yield sources_str # Envoyer les sources
399
 
400
- # Mettre en cache la réponse complète
401
  question_cache[question] = (full_response, context)
402
  except Exception as e:
403
  yield f"Erreur lors du traitement : {str(e)}"
404
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405
 
406
  # def process_question(question: str) -> tuple[str, list[str]]:
407
  # # Check cache first
@@ -440,44 +440,44 @@ def process_question(question: str) -> Iterator[str]:
440
 
441
 
442
 
443
- # def gradio_stream(question: str, chat_history: list) -> Iterator[list]:
444
- # """
445
- # Format the output for Gradio Chatbot component with streaming.
446
- # """
447
- # full_response = ""
448
- # try:
449
- # for partial_response in process_question(question):
450
- # full_response = partial_response
451
- # # Append the latest assistant response to chat history
452
- # updated_chat = chat_history + [[question, partial_response]]
453
- # yield updated_chat
454
- # except Exception as e:
455
- # # Handle errors during streaming
456
- # updated_chat = chat_history + [[question, f"Erreur : {str(e)}"]]
457
- # yield updated_chat
458
-
459
  def gradio_stream(question: str, chat_history: list) -> Iterator[list]:
460
  """
461
- Format the output for Gradio Chatbot component with streaming, including sources.
462
  """
463
  full_response = ""
464
- sources_str = ""
465
  try:
466
  for partial_response in process_question(question):
467
- if "Sources :" in partial_response:
468
- # Les sources sont ajoutées à la réponse finale
469
- sources_str = partial_response
470
- updated_chat = chat_history + [[question, full_response + "\n" + sources_str]]
471
- else:
472
- # Construire progressivement la réponse
473
- full_response = partial_response
474
- updated_chat = chat_history + [[question, full_response]]
475
  yield updated_chat
476
  except Exception as e:
477
- # Gestion des erreurs lors du streaming
478
  updated_chat = chat_history + [[question, f"Erreur : {str(e)}"]]
479
  yield updated_chat
480
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
481
 
482
  # Gradio interface
483
  with gr.Blocks(css=css) as demo:
 
328
  }
329
  """
330
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331
  def process_question(question: str) -> Iterator[str]:
332
  """
333
+ Process the question and return a response generator for streaming.
334
  """
335
  if question in question_cache:
336
  yield question_cache[question][0]
337
  return
338
 
 
339
  relevant_docs = retriever(question)
340
  context = "\n".join([doc.page_content for doc in relevant_docs])
 
 
341
 
 
342
  prompt = prompt_template.format_messages(
343
  context=context,
344
  question=question
 
346
 
347
  full_response = ""
348
  try:
 
349
  for chunk in llm.stream(prompt):
350
  if isinstance(chunk, str):
351
  current_chunk = chunk
 
353
  current_chunk = chunk.content
354
 
355
  full_response += current_chunk
356
+ yield full_response # Send the updated response in streaming
 
 
 
 
 
 
357
 
 
358
  question_cache[question] = (full_response, context)
359
  except Exception as e:
360
  yield f"Erreur lors du traitement : {str(e)}"
361
 
362
+ # def process_question(question: str) -> Iterator[str]:
363
+ # """
364
+ # Process the question and return a response generator for streaming, including sources.
365
+ # """
366
+ # if question in question_cache:
367
+ # yield question_cache[question][0]
368
+ # return
369
+
370
+ # # Récupérer les documents pertinents
371
+ # relevant_docs = retriever(question)
372
+ # context = "\n".join([doc.page_content for doc in relevant_docs])
373
+ # sources = [doc.metadata.get("source", "Source inconnue") for doc in relevant_docs]
374
+ # sources = os.path.splitext(sources[0])[0] if sources else "غير معروف"
375
+
376
+ # # Générer le prompt
377
+ # prompt = prompt_template.format_messages(
378
+ # context=context,
379
+ # question=question
380
+ # )
381
+
382
+ # full_response = ""
383
+ # try:
384
+ # # Streaming de la réponse
385
+ # for chunk in llm.stream(prompt):
386
+ # if isinstance(chunk, str):
387
+ # current_chunk = chunk
388
+ # else:
389
+ # current_chunk = chunk.content
390
+
391
+ # full_response += current_chunk
392
+ # yield full_response # Envoyer la réponse mise à jour en streaming
393
+
394
+ # # Ajouter les sources à la réponse finale
395
+ # if sources:
396
+ # sources_str = "\nSources :\n" + "\n".join(f"- {source}" for source in sources)
397
+ # full_response += sources_str
398
+ # yield sources_str # Envoyer les sources
399
+
400
+ # # Mettre en cache la réponse complète
401
+ # question_cache[question] = (full_response, context)
402
+ # except Exception as e:
403
+ # yield f"Erreur lors du traitement : {str(e)}"
404
+
405
 
406
  # def process_question(question: str) -> tuple[str, list[str]]:
407
  # # Check cache first
 
440
 
441
 
442
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
443
  def gradio_stream(question: str, chat_history: list) -> Iterator[list]:
444
  """
445
+ Format the output for Gradio Chatbot component with streaming.
446
  """
447
  full_response = ""
 
448
  try:
449
  for partial_response in process_question(question):
450
+ full_response = partial_response
451
+ # Append the latest assistant response to chat history
452
+ updated_chat = chat_history + [[question, partial_response]]
 
 
 
 
 
453
  yield updated_chat
454
  except Exception as e:
455
+ # Handle errors during streaming
456
  updated_chat = chat_history + [[question, f"Erreur : {str(e)}"]]
457
  yield updated_chat
458
 
459
+ # def gradio_stream(question: str, chat_history: list) -> Iterator[list]:
460
+ # """
461
+ # Format the output for Gradio Chatbot component with streaming, including sources.
462
+ # """
463
+ # full_response = ""
464
+ # sources_str = ""
465
+ # try:
466
+ # for partial_response in process_question(question):
467
+ # if "Sources :" in partial_response:
468
+ # # Les sources sont ajoutées à la réponse finale
469
+ # sources_str = partial_response
470
+ # updated_chat = chat_history + [[question, full_response + "\n" + sources_str]]
471
+ # else:
472
+ # # Construire progressivement la réponse
473
+ # full_response = partial_response
474
+ # updated_chat = chat_history + [[question, full_response]]
475
+ # yield updated_chat
476
+ # except Exception as e:
477
+ # # Gestion des erreurs lors du streaming
478
+ # updated_chat = chat_history + [[question, f"Erreur : {str(e)}"]]
479
+ # yield updated_chat
480
+
481
 
482
  # Gradio interface
483
  with gr.Blocks(css=css) as demo: