Update app.py
Browse files
app.py
CHANGED
@@ -194,6 +194,7 @@ def init_agent():
|
|
194 |
step_rag_num=4,
|
195 |
seed=100,
|
196 |
additional_default_tools=[],
|
|
|
197 |
)
|
198 |
|
199 |
def preload_models():
|
@@ -214,25 +215,24 @@ def create_ui(agent):
|
|
214 |
msg_input = gr.Textbox(placeholder="Ask about potential oversights...", show_label=False)
|
215 |
send_btn = gr.Button("Analyze", variant="primary")
|
216 |
download_output = gr.File(label="Download Full Report")
|
217 |
-
progress_bar = gr.Progress()
|
218 |
|
219 |
prompt_template = """
|
220 |
Analyze the patient record excerpt for clinical oversights. Provide a concise, evidence-based summary in markdown with findings grouped under tool-derived headings (e.g., 'Drugs'). For each finding, include clinical context, risks, and recommendations. Precede findings with a tool tag (e.g., [TOOL: get_abuse_info_by_drug_name]). Output only markdown bullet points under headings. If no issues, state "No issues identified".
|
221 |
-
|
222 |
Patient Record Excerpt (Chunk {0} of {1}):
|
223 |
{chunk}
|
224 |
"""
|
225 |
|
226 |
def analyze(message: str, history: List[dict], files: List, progress=gr.Progress()):
|
227 |
-
history.append({"role": "user", "content": message})
|
228 |
-
yield history, None
|
229 |
|
230 |
extracted = ""
|
231 |
file_hash_value = ""
|
232 |
if files:
|
233 |
def update_extraction_progress(current, total):
|
234 |
progress(current / total, desc=f"Extracting text... Page {current}/{total}")
|
235 |
-
return history, None
|
236 |
|
237 |
with ThreadPoolExecutor(max_workers=6) as executor:
|
238 |
futures = [executor.submit(convert_file_to_json, f.name, f.name.split(".")[-1].lower(), update_extraction_progress) for f in files]
|
@@ -241,7 +241,7 @@ Patient Record Excerpt (Chunk {0} of {1}):
|
|
241 |
file_hash_value = file_hash(files[0].name) if files else ""
|
242 |
|
243 |
history.append({"role": "assistant", "content": "β
Text extraction complete."})
|
244 |
-
yield history, None
|
245 |
|
246 |
chunk_size = 6000
|
247 |
chunks = [extracted[i:i + chunk_size] for i in range(0, len(extracted), chunk_size)]
|
@@ -281,7 +281,7 @@ Patient Record Excerpt (Chunk {0} of {1}):
|
|
281 |
else:
|
282 |
combined_response += f"--- Analysis for Chunk {chunk_idx} ---\nNo oversights identified for this chunk.\n\n"
|
283 |
history[-1] = {"role": "assistant", "content": combined_response.strip()}
|
284 |
-
yield history, None
|
285 |
|
286 |
if combined_response.strip() and not all("No oversights identified" in chunk for chunk in combined_response.split("--- Analysis for Chunk")):
|
287 |
history[-1]["content"] = combined_response.strip()
|
@@ -292,29 +292,25 @@ Patient Record Excerpt (Chunk {0} of {1}):
|
|
292 |
if report_path:
|
293 |
with open(report_path, "w", encoding="utf-8") as f:
|
294 |
f.write(combined_response)
|
295 |
-
yield history, report_path if report_path and os.path.exists(report_path) else None
|
296 |
|
297 |
except Exception as e:
|
298 |
print("π¨ ERROR:", e)
|
299 |
history.append({"role": "assistant", "content": f"β Error occurred: {str(e)}"})
|
300 |
-
yield history, None
|
301 |
|
302 |
-
send_btn.click(analyze, inputs=[msg_input, gr.State([]), file_upload], outputs=[chatbot, download_output])
|
303 |
-
msg_input.submit(analyze, inputs=[msg_input, gr.State([]), file_upload], outputs=[chatbot, download_output])
|
304 |
return demo
|
305 |
|
306 |
if __name__ == "__main__":
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
)
|
318 |
-
finally:
|
319 |
-
if torch.distributed.is_initialized():
|
320 |
-
torch.distributed.destroy_process_group() # Clean up distributed resources
|
|
|
194 |
step_rag_num=4,
|
195 |
seed=100,
|
196 |
additional_default_tools=[],
|
197 |
+
dtype=torch.float16, # Enable mixed precision
|
198 |
)
|
199 |
|
200 |
def preload_models():
|
|
|
215 |
msg_input = gr.Textbox(placeholder="Ask about potential oversights...", show_label=False)
|
216 |
send_btn = gr.Button("Analyze", variant="primary")
|
217 |
download_output = gr.File(label="Download Full Report")
|
218 |
+
progress_bar = gr.Progress()
|
219 |
|
220 |
prompt_template = """
|
221 |
Analyze the patient record excerpt for clinical oversights. Provide a concise, evidence-based summary in markdown with findings grouped under tool-derived headings (e.g., 'Drugs'). For each finding, include clinical context, risks, and recommendations. Precede findings with a tool tag (e.g., [TOOL: get_abuse_info_by_drug_name]). Output only markdown bullet points under headings. If no issues, state "No issues identified".
|
|
|
222 |
Patient Record Excerpt (Chunk {0} of {1}):
|
223 |
{chunk}
|
224 |
"""
|
225 |
|
226 |
def analyze(message: str, history: List[dict], files: List, progress=gr.Progress()):
|
227 |
+
history.append({"role": "user mesage": "user", "content": message})
|
228 |
+
yield history, None, None
|
229 |
|
230 |
extracted = ""
|
231 |
file_hash_value = ""
|
232 |
if files:
|
233 |
def update_extraction_progress(current, total):
|
234 |
progress(current / total, desc=f"Extracting text... Page {current}/{total}")
|
235 |
+
return history, None, None
|
236 |
|
237 |
with ThreadPoolExecutor(max_workers=6) as executor:
|
238 |
futures = [executor.submit(convert_file_to_json, f.name, f.name.split(".")[-1].lower(), update_extraction_progress) for f in files]
|
|
|
241 |
file_hash_value = file_hash(files[0].name) if files else ""
|
242 |
|
243 |
history.append({"role": "assistant", "content": "β
Text extraction complete."})
|
244 |
+
yield history, None, None
|
245 |
|
246 |
chunk_size = 6000
|
247 |
chunks = [extracted[i:i + chunk_size] for i in range(0, len(extracted), chunk_size)]
|
|
|
281 |
else:
|
282 |
combined_response += f"--- Analysis for Chunk {chunk_idx} ---\nNo oversights identified for this chunk.\n\n"
|
283 |
history[-1] = {"role": "assistant", "content": combined_response.strip()}
|
284 |
+
yield history, None, None
|
285 |
|
286 |
if combined_response.strip() and not all("No oversights identified" in chunk for chunk in combined_response.split("--- Analysis for Chunk")):
|
287 |
history[-1]["content"] = combined_response.strip()
|
|
|
292 |
if report_path:
|
293 |
with open(report_path, "w", encoding="utf-8") as f:
|
294 |
f.write(combined_response)
|
295 |
+
yield history, report_path if report_path and os.path.exists(report_path) else None, None
|
296 |
|
297 |
except Exception as e:
|
298 |
print("π¨ ERROR:", e)
|
299 |
history.append({"role": "assistant", "content": f"β Error occurred: {str(e)}"})
|
300 |
+
yield history, None, None
|
301 |
|
302 |
+
send_btn.click(analyze, inputs=[msg_input, gr.State([]), file_upload], outputs=[chatbot, download_output, progress_bar])
|
303 |
+
msg_input.submit(analyze, inputs=[msg_input, gr.State([]), file_upload], outputs=[chatbot, download_output, progress_bar])
|
304 |
return demo
|
305 |
|
306 |
if __name__ == "__main__":
|
307 |
+
print("π Launching app...")
|
308 |
+
agent = init_agent()
|
309 |
+
demo = create_ui(agent)
|
310 |
+
demo.queue(api_open=False).launch(
|
311 |
+
server_name="0.0.0.0",
|
312 |
+
server_port=7860,
|
313 |
+
show_error=True,
|
314 |
+
allowed_paths=[report_dir],
|
315 |
+
share=False
|
316 |
+
)
|
|
|
|
|
|
|
|