cvips
commited on
Commit
·
523cd54
1
Parent(s):
e13af0d
bomedllamv2 integrated
Browse files
app.py
CHANGED
@@ -269,7 +269,7 @@ def update_example_prompts(modality):
|
|
269 |
def process_image(image_path, user_prompt, modality=None):
|
270 |
try:
|
271 |
if not image_path:
|
272 |
-
|
273 |
|
274 |
image = read_rgb(image_path)
|
275 |
pil_image = Image.fromarray(image)
|
@@ -282,47 +282,62 @@ def process_image(image_path, user_prompt, modality=None):
|
|
282 |
|
283 |
llm_response = ""
|
284 |
if llm_model and llm_tokenizer:
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
|
|
|
|
|
|
|
|
294 |
else:
|
295 |
llm_response = "LLM not available. Please check LLM initialization logs."
|
296 |
|
297 |
detected_modality = extract_modality_from_llm(llm_response)
|
298 |
if not detected_modality:
|
299 |
-
# Fallback
|
300 |
-
detected_modality = "X-Ray-Chest"
|
301 |
|
302 |
clinical_findings = extract_clinical_findings(llm_response, detected_modality)
|
303 |
if not clinical_findings:
|
304 |
-
|
305 |
-
clinical_findings = [detected_modality.split("-")[-1]]
|
306 |
|
307 |
results = []
|
308 |
analysis_results = []
|
309 |
colors = [(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255)]
|
310 |
|
311 |
for idx, finding in enumerate(clinical_findings):
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
325 |
|
|
|
|
|
|
|
326 |
enhanced_response = llm_response + "\n\nSegmentation Results:\n"
|
327 |
for idx, finding in enumerate(clinical_findings):
|
328 |
color_name = ["red", "green", "blue", "yellow", "magenta"][idx % len(colors)]
|
@@ -341,7 +356,7 @@ def process_image(image_path, user_prompt, modality=None):
|
|
341 |
except Exception as e:
|
342 |
error_msg = f"⚠️ An error occurred: {str(e)}"
|
343 |
print(f"Error details: {str(e)}", flush=True)
|
344 |
-
return
|
345 |
|
346 |
with gr.Blocks() as demo:
|
347 |
gr.HTML(MARKDOWN)
|
|
|
269 |
def process_image(image_path, user_prompt, modality=None):
|
270 |
try:
|
271 |
if not image_path:
|
272 |
+
return [], "Please upload an image", "No modality detected"
|
273 |
|
274 |
image = read_rgb(image_path)
|
275 |
pil_image = Image.fromarray(image)
|
|
|
282 |
|
283 |
llm_response = ""
|
284 |
if llm_model and llm_tokenizer:
|
285 |
+
try:
|
286 |
+
for new_text in llm_model.chat(
|
287 |
+
image=pil_image,
|
288 |
+
msgs=msgs,
|
289 |
+
tokenizer=llm_tokenizer,
|
290 |
+
sampling=True,
|
291 |
+
temperature=0.95,
|
292 |
+
stream=True
|
293 |
+
):
|
294 |
+
llm_response += new_text
|
295 |
+
except Exception as e:
|
296 |
+
print(f"LLM chat error: {str(e)}")
|
297 |
+
llm_response = "LLM analysis failed. Proceeding with basic analysis."
|
298 |
else:
|
299 |
llm_response = "LLM not available. Please check LLM initialization logs."
|
300 |
|
301 |
detected_modality = extract_modality_from_llm(llm_response)
|
302 |
if not detected_modality:
|
303 |
+
detected_modality = "X-Ray-Chest" # Fallback modality
|
|
|
304 |
|
305 |
clinical_findings = extract_clinical_findings(llm_response, detected_modality)
|
306 |
if not clinical_findings:
|
307 |
+
clinical_findings = [detected_modality.split("-")[-1].lower()]
|
|
|
308 |
|
309 |
results = []
|
310 |
analysis_results = []
|
311 |
colors = [(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255)]
|
312 |
|
313 |
for idx, finding in enumerate(clinical_findings):
|
314 |
+
try:
|
315 |
+
mask_list = interactive_infer_image(model, pil_image, [finding])
|
316 |
+
if not mask_list or len(mask_list) == 0:
|
317 |
+
analysis_results.append(f"No mask generated for '{finding}'")
|
318 |
+
continue
|
319 |
+
|
320 |
+
pred_mask = mask_list[0]
|
321 |
+
if pred_mask is None or not pred_mask.any():
|
322 |
+
analysis_results.append(f"Empty mask generated for '{finding}'")
|
323 |
+
continue
|
324 |
+
|
325 |
+
p_value = check_mask_stats(image, pred_mask.astype(np.uint8) * 255, detected_modality, finding)
|
326 |
+
analysis_results.append(f"P-value for '{finding}' ({detected_modality}): {p_value:.4f}")
|
327 |
+
|
328 |
+
overlay_image = image.copy()
|
329 |
+
color = colors[idx % len(colors)]
|
330 |
+
mask_indices = pred_mask > 0.5
|
331 |
+
if mask_indices.any():
|
332 |
+
overlay_image[mask_indices] = color
|
333 |
+
results.append(overlay_image)
|
334 |
+
except Exception as e:
|
335 |
+
print(f"Error processing finding {finding}: {str(e)}")
|
336 |
+
analysis_results.append(f"Failed to process '{finding}': {str(e)}")
|
337 |
|
338 |
+
if not results:
|
339 |
+
results = [image] # Return original image if no overlays were created
|
340 |
+
|
341 |
enhanced_response = llm_response + "\n\nSegmentation Results:\n"
|
342 |
for idx, finding in enumerate(clinical_findings):
|
343 |
color_name = ["red", "green", "blue", "yellow", "magenta"][idx % len(colors)]
|
|
|
356 |
except Exception as e:
|
357 |
error_msg = f"⚠️ An error occurred: {str(e)}"
|
358 |
print(f"Error details: {str(e)}", flush=True)
|
359 |
+
return [image] if 'image' in locals() else [], error_msg, "Error detecting modality"
|
360 |
|
361 |
with gr.Blocks() as demo:
|
362 |
gr.HTML(MARKDOWN)
|