cvips
commited on
Commit
·
ccd6eeb
1
Parent(s):
6f76eb3
biomedparse+biomedllama_3b_multimodal
Browse files
app.py
CHANGED
@@ -380,6 +380,14 @@ def process_image(image_path, user_prompt, modality=None):
|
|
380 |
analysis_results = []
|
381 |
colors = [(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255)]
|
382 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
383 |
for idx, prompt in enumerate(relevant_prompts):
|
384 |
try:
|
385 |
mask_list = interactive_infer_image(model, pil_image, [prompt])
|
@@ -407,13 +415,14 @@ def process_image(image_path, user_prompt, modality=None):
|
|
407 |
results = [image] # Return original image if no overlays were created
|
408 |
|
409 |
detailed_analysis = ""
|
410 |
-
# try:
|
411 |
-
analysis_prompt = f"Give the modality, organ, analysis, abnormalities (if any), treatment (if abnormalities are present) for this image. Focus more on the user question. which is: {user_prompt}"
|
412 |
-
msgs = [{'role': 'user', 'content': [pil_image, analysis_prompt]}]
|
413 |
-
|
414 |
-
# llm_response = ""
|
415 |
if llm_model and llm_tokenizer:
|
416 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
417 |
for new_text in llm_model.chat(
|
418 |
image=pil_image,
|
419 |
msgs=msgs,
|
|
|
380 |
analysis_results = []
|
381 |
colors = [(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255)]
|
382 |
|
383 |
+
# Add color mapping to analysis with more natural language
|
384 |
+
color_descriptions = []
|
385 |
+
for idx, prompt in enumerate(relevant_prompts):
|
386 |
+
color = colors[idx % len(colors)]
|
387 |
+
color_name = {(255,0,0): "red", (0,255,0): "green", (0,0,255): "blue",
|
388 |
+
(255,255,0): "yellow", (255,0,255): "magenta"}[color]
|
389 |
+
color_descriptions.append(f"The {prompt} is highlighted in {color_name} color")
|
390 |
+
|
391 |
for idx, prompt in enumerate(relevant_prompts):
|
392 |
try:
|
393 |
mask_list = interactive_infer_image(model, pil_image, [prompt])
|
|
|
415 |
results = [image] # Return original image if no overlays were created
|
416 |
|
417 |
detailed_analysis = ""
|
|
|
|
|
|
|
|
|
|
|
418 |
if llm_model and llm_tokenizer:
|
419 |
try:
|
420 |
+
# Add color legend with more natural language
|
421 |
+
detailed_analysis += "\n\n As shown in the images outputs details:\n \n" + "\n".join(color_descriptions)
|
422 |
+
|
423 |
+
analysis_prompt = f"Give the modality, organ, analysis, abnormalities (if any), treatment (if abnormalities are present) for this image. Focus more on the user question. which is: {user_prompt}"
|
424 |
+
msgs = [{'role': 'user', 'content': [pil_image, analysis_prompt]}]
|
425 |
+
|
426 |
for new_text in llm_model.chat(
|
427 |
image=pil_image,
|
428 |
msgs=msgs,
|