Ali2206 commited on
Commit
5226240
·
verified ·
1 Parent(s): e63e4a1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -5
app.py CHANGED
@@ -1,4 +1,8 @@
1
-
 
 
 
 
2
  import gradio as gr
3
  from typing import List
4
  from concurrent.futures import ThreadPoolExecutor, as_completed
@@ -164,6 +168,8 @@ Medical Records:
164
 
165
  try:
166
  response = ""
 
 
167
  for chunk in agent.run_gradio_chat(
168
  message=prompt,
169
  history=[],
@@ -173,21 +179,26 @@ Medical Records:
173
  call_agent=False,
174
  conversation=[]
175
  ):
 
 
176
  if isinstance(chunk, str):
177
  response += chunk
178
  elif isinstance(chunk, list):
179
- response += "".join([c.content for c in chunk if hasattr(c, "content") and c.content])
 
 
 
180
 
181
- clean_response = response.split("[TOOL_CALLS]")[0].strip()
182
  if not clean_response:
183
- clean_response = "⚠️ No clear oversights identified or an error occurred."
184
 
185
  history[-1] = {"role": "assistant", "content": clean_response}
186
  report_path = os.path.join(report_dir, f"{file_hash_value}_report.txt") if file_hash_value else None
187
  yield history, report_path if report_path and os.path.exists(report_path) else None
188
 
189
  except Exception as e:
190
- print("ERROR:", str(e))
191
  history[-1] = {"role": "assistant", "content": f"❌ An error occurred: {str(e)}"}
192
  yield history, None
193
 
 
1
+ import sys
2
+ import os
3
+ import pandas as pd
4
+ import pdfplumber
5
+ import json
6
  import gradio as gr
7
  from typing import List
8
  from concurrent.futures import ThreadPoolExecutor, as_completed
 
168
 
169
  try:
170
  response = ""
171
+ finish_detected = False
172
+
173
  for chunk in agent.run_gradio_chat(
174
  message=prompt,
175
  history=[],
 
179
  call_agent=False,
180
  conversation=[]
181
  ):
182
+ if chunk is None:
183
+ continue
184
  if isinstance(chunk, str):
185
  response += chunk
186
  elif isinstance(chunk, list):
187
+ chunk_str = "".join([c.content for c in chunk if hasattr(c, "content") and c.content])
188
+ response += chunk_str
189
+ if '"name": "Finish"' in chunk_str:
190
+ finish_detected = True
191
 
192
+ clean_response = response.rsplit("[TOOL_CALLS]", 1)[0].strip()
193
  if not clean_response:
194
+ clean_response = "⚠️ No clear oversights identified or model output was invalid."
195
 
196
  history[-1] = {"role": "assistant", "content": clean_response}
197
  report_path = os.path.join(report_dir, f"{file_hash_value}_report.txt") if file_hash_value else None
198
  yield history, report_path if report_path and os.path.exists(report_path) else None
199
 
200
  except Exception as e:
201
+ print("ERROR:", str(e))
202
  history[-1] = {"role": "assistant", "content": f"❌ An error occurred: {str(e)}"}
203
  yield history, None
204