Ali2206 commited on
Commit
b4fa34c
·
verified ·
1 Parent(s): 34a564f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -61
app.py CHANGED
@@ -1,4 +1,4 @@
1
- # (Full Updated Code Snippet with Proper Final Response Handling)
2
 
3
  import sys
4
  import os
@@ -23,6 +23,7 @@ if not torch.cuda.is_available():
23
 
24
  persistent_dir = "/data/hf_cache"
25
  os.makedirs(persistent_dir, exist_ok=True)
 
26
  model_cache_dir = os.path.join(persistent_dir, "txagent_models")
27
  tool_cache_dir = os.path.join(persistent_dir, "tool_cache")
28
  file_cache_dir = os.path.join(persistent_dir, "cache")
@@ -67,10 +68,9 @@ def extract_priority_pages(file_path: str, max_pages: int = 20) -> str:
67
  text_chunks.append(f"=== Page {i} ===\n{page_text.strip()}")
68
  return "\n\n".join(text_chunks)
69
  except Exception as e:
70
- debug_msg = f"PDF processing error: {str(e)}"
71
- print(debug_msg)
72
  traceback.print_exc()
73
- return debug_msg
74
 
75
  def convert_file_to_json(file_path: str, file_type: str) -> str:
76
  try:
@@ -84,8 +84,7 @@ def convert_file_to_json(file_path: str, file_type: str) -> str:
84
  text = extract_priority_pages(file_path)
85
  result = json.dumps({"filename": os.path.basename(file_path), "content": text, "status": "initial"})
86
  elif file_type == "csv":
87
- df = pd.read_csv(file_path, encoding_errors="replace", header=None, dtype=str,
88
- skip_blank_lines=False, on_bad_lines="skip")
89
  content = df.fillna("").astype(str).values.tolist()
90
  result = json.dumps({"filename": os.path.basename(file_path), "rows": content})
91
  elif file_type in ["xls", "xlsx"]:
@@ -97,14 +96,14 @@ def convert_file_to_json(file_path: str, file_type: str) -> str:
97
  result = json.dumps({"filename": os.path.basename(file_path), "rows": content})
98
  else:
99
  result = json.dumps({"error": f"Unsupported file type: {file_type}"})
 
100
  with open(cache_path, "w", encoding="utf-8") as f:
101
  f.write(result)
102
  return result
103
  except Exception as e:
104
- error_msg = f"Error processing {os.path.basename(file_path)}: {str(e)}"
105
- print(error_msg)
106
  traceback.print_exc()
107
- return json.dumps({"error": error_msg})
108
 
109
  def log_system_usage(tag=""):
110
  try:
@@ -124,7 +123,7 @@ def log_system_usage(tag=""):
124
 
125
  def init_agent():
126
  try:
127
- print("\U0001F501 Initializing model...")
128
  log_system_usage("Before Load")
129
  default_tool_path = os.path.abspath("data/new_tool.json")
130
  target_tool_path = os.path.join(tool_cache_dir, "new_tool.json")
@@ -173,14 +172,14 @@ def create_ui(agent):
173
  results = []
174
  for future in as_completed(futures):
175
  try:
176
- results.append(sanitize_utf8(future.result()))
 
177
  except Exception as e:
178
  print("❌ Error in file processing:", str(e))
179
  traceback.print_exc()
180
- extracted = "\n".join(results)
181
- file_hash_value = file_hash(files[0].name)
182
 
183
- max_content_length = 8000
184
  prompt = f"""Review these medical records and identify EXACTLY what might have been missed:
185
  1. List potential missed diagnoses
186
  2. Flag any medication conflicts
@@ -188,13 +187,16 @@ def create_ui(agent):
188
  4. Highlight abnormal results needing follow-up
189
 
190
  Medical Records:
191
- {extracted[:max_content_length]}
192
 
193
  ### Potential Oversights:
194
  """
 
 
195
 
196
  full_response = ""
197
  response_chunks = []
 
198
 
199
  for chunk in agent.run_gradio_chat(
200
  message=prompt,
@@ -205,55 +207,39 @@ Medical Records:
205
  call_agent=False,
206
  conversation=[]
207
  ):
208
- try:
209
- chunk_content = ""
210
- if isinstance(chunk, str):
211
- chunk_content = chunk
212
- elif hasattr(chunk, 'content'):
213
- chunk_content = chunk.content
214
- elif isinstance(chunk, list):
215
- chunk_content = "".join([c.content for c in chunk if hasattr(c, "content") and c.content])
216
-
217
- if not chunk_content:
218
- continue
219
-
220
- response_chunks.append(chunk_content)
221
- full_response = "".join(response_chunks)
222
-
223
- display_response = re.split(r"\\[TOOL_CALLS\\].*?$", full_response, flags=re.DOTALL)[0].strip()
224
- display_response = display_response.replace('[TxAgent]', '').strip()
225
-
226
- if len(history) > 1 and history[-2]["role"] == "assistant" and history[-2]["content"] == display_response:
227
- pass
228
- else:
229
- if len(history) > 0 and history[-1]["role"] == "assistant":
230
- history[-1]["content"] = display_response
231
- else:
232
- history.append({"role": "assistant", "content": display_response})
233
-
234
- yield history, None
235
- except Exception as e:
236
- print("❌ Error processing chunk:", str(e))
237
- traceback.print_exc()
238
  continue
 
 
239
 
240
- if not full_response:
241
- full_response = "⚠️ No clear oversights identified or model output was invalid."
242
- else:
243
- full_response = re.split(r"\\[TOOL_CALLS\\].*?$", full_response, flags=re.DOTALL)[0].strip()
244
- full_response = full_response.replace('[TxAgent]', '').strip()
 
 
 
 
 
 
 
 
 
 
 
 
245
 
246
  report_path = None
247
  if file_hash_value:
248
  report_path = os.path.join(report_dir, f"{file_hash_value}_report.txt")
249
- try:
250
- with open(report_path, "w", encoding="utf-8") as f:
251
- f.write(full_response)
252
- except Exception as e:
253
- print("❌ Error saving report:", str(e))
254
- traceback.print_exc()
255
-
256
- if len(history) > 0 and history[-1]["role"] == "assistant":
257
  history[-1]["content"] = full_response
258
  else:
259
  history.append({"role": "assistant", "content": full_response})
@@ -261,10 +247,8 @@ Medical Records:
261
  yield history, report_path if report_path and os.path.exists(report_path) else None
262
 
263
  except Exception as e:
264
- error_message = f"❌ An error occurred in analyze: {str(e)}"
265
- print(error_message)
266
  traceback.print_exc()
267
- history.append({"role": "assistant", "content": error_message})
268
  yield history, None
269
 
270
  send_btn.click(analyze, inputs=[msg_input, gr.State([]), file_upload], outputs=[chatbot, download_output])
 
1
+ # Full updated app.py with TOOL_CALLS displayed separately and full fixes
2
 
3
  import sys
4
  import os
 
23
 
24
  persistent_dir = "/data/hf_cache"
25
  os.makedirs(persistent_dir, exist_ok=True)
26
+
27
  model_cache_dir = os.path.join(persistent_dir, "txagent_models")
28
  tool_cache_dir = os.path.join(persistent_dir, "tool_cache")
29
  file_cache_dir = os.path.join(persistent_dir, "cache")
 
68
  text_chunks.append(f"=== Page {i} ===\n{page_text.strip()}")
69
  return "\n\n".join(text_chunks)
70
  except Exception as e:
71
+ print("PDF processing error:", str(e))
 
72
  traceback.print_exc()
73
+ return str(e)
74
 
75
  def convert_file_to_json(file_path: str, file_type: str) -> str:
76
  try:
 
84
  text = extract_priority_pages(file_path)
85
  result = json.dumps({"filename": os.path.basename(file_path), "content": text, "status": "initial"})
86
  elif file_type == "csv":
87
+ df = pd.read_csv(file_path, encoding_errors="replace", header=None, dtype=str, skip_blank_lines=False, on_bad_lines="skip")
 
88
  content = df.fillna("").astype(str).values.tolist()
89
  result = json.dumps({"filename": os.path.basename(file_path), "rows": content})
90
  elif file_type in ["xls", "xlsx"]:
 
96
  result = json.dumps({"filename": os.path.basename(file_path), "rows": content})
97
  else:
98
  result = json.dumps({"error": f"Unsupported file type: {file_type}"})
99
+
100
  with open(cache_path, "w", encoding="utf-8") as f:
101
  f.write(result)
102
  return result
103
  except Exception as e:
104
+ print("Error processing", file_path, str(e))
 
105
  traceback.print_exc()
106
+ return json.dumps({"error": str(e)})
107
 
108
  def log_system_usage(tag=""):
109
  try:
 
123
 
124
  def init_agent():
125
  try:
126
+ print("🔁 Initializing model...")
127
  log_system_usage("Before Load")
128
  default_tool_path = os.path.abspath("data/new_tool.json")
129
  target_tool_path = os.path.join(tool_cache_dir, "new_tool.json")
 
172
  results = []
173
  for future in as_completed(futures):
174
  try:
175
+ res = future.result()
176
+ results.append(sanitize_utf8(res))
177
  except Exception as e:
178
  print("❌ Error in file processing:", str(e))
179
  traceback.print_exc()
180
+ extracted = "\n".join(results)
181
+ file_hash_value = file_hash(files[0].name)
182
 
 
183
  prompt = f"""Review these medical records and identify EXACTLY what might have been missed:
184
  1. List potential missed diagnoses
185
  2. Flag any medication conflicts
 
187
  4. Highlight abnormal results needing follow-up
188
 
189
  Medical Records:
190
+ {extracted[:8000]}
191
 
192
  ### Potential Oversights:
193
  """
194
+ print("🔎 Generated prompt:")
195
+ print(prompt)
196
 
197
  full_response = ""
198
  response_chunks = []
199
+ tool_calls_rendered = []
200
 
201
  for chunk in agent.run_gradio_chat(
202
  message=prompt,
 
207
  call_agent=False,
208
  conversation=[]
209
  ):
210
+ if chunk is None:
211
+ continue
212
+ chunk_content = chunk if isinstance(chunk, str) else getattr(chunk, 'content', '')
213
+ if not chunk_content:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
  continue
215
+ response_chunks.append(chunk_content)
216
+ full_response = "".join(response_chunks)
217
 
218
+ matches = re.findall(r"\[TOOL_CALLS\]\[(.*?)\]", chunk_content, re.DOTALL)
219
+ for m in matches:
220
+ tool_calls_rendered.append(f"\n📦 Tool Call: [{m.strip()}]")
221
+
222
+ display_response = re.sub(r"\[TOOL_CALLS\].*?\n*", "", full_response, flags=re.DOTALL)
223
+ display_response = display_response.replace('[TxAgent]', '').strip()
224
+ display_response += "\n\n" + "\n".join(tool_calls_rendered)
225
+
226
+ if history and history[-1]["role"] == "assistant":
227
+ history[-1]["content"] = display_response
228
+ else:
229
+ history.append({"role": "assistant", "content": display_response})
230
+
231
+ yield history, None
232
+
233
+ full_response = re.sub(r"\[TOOL_CALLS\].*?\n*", "", full_response, flags=re.DOTALL).strip()
234
+ full_response = full_response.replace('[TxAgent]', '').strip()
235
 
236
  report_path = None
237
  if file_hash_value:
238
  report_path = os.path.join(report_dir, f"{file_hash_value}_report.txt")
239
+ with open(report_path, "w", encoding="utf-8") as f:
240
+ f.write(full_response)
241
+
242
+ if history and history[-1]["role"] == "assistant":
 
 
 
 
243
  history[-1]["content"] = full_response
244
  else:
245
  history.append({"role": "assistant", "content": full_response})
 
247
  yield history, report_path if report_path and os.path.exists(report_path) else None
248
 
249
  except Exception as e:
250
+ history.append({"role": "assistant", "content": f"❌ An error occurred in analyze: {str(e)}"})
 
251
  traceback.print_exc()
 
252
  yield history, None
253
 
254
  send_btn.click(analyze, inputs=[msg_input, gr.State([]), file_upload], outputs=[chatbot, download_output])