major change, used biomed_llama 7b
Browse files- app.py +49 -45
- requirements.txt +10 -0
app.py
CHANGED
@@ -190,28 +190,24 @@ def initialize_model():
|
|
190 |
return model
|
191 |
|
192 |
def initialize_llm():
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
trust_remote_code=True
|
212 |
-
)
|
213 |
-
|
214 |
-
return model, tokenizer
|
215 |
|
216 |
model = initialize_model()
|
217 |
llm_model, llm_tokenizer = initialize_llm()
|
@@ -254,29 +250,37 @@ def process_image(image_path, text_prompts, modality):
|
|
254 |
overlay_image[pred_masks[i] > 0.5] = [255, 0, 0]
|
255 |
results.append(overlay_image)
|
256 |
|
257 |
-
# Process with LLM
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
280 |
|
281 |
return results, combined_analysis
|
282 |
|
|
|
190 |
return model
|
191 |
|
192 |
def initialize_llm():
|
193 |
+
try:
|
194 |
+
model = AutoModel.from_pretrained(
|
195 |
+
"ContactDoctor/Bio-Medical-MultiModal-Llama-3-8B-V1",
|
196 |
+
device_map="auto",
|
197 |
+
torch_dtype=torch.float16,
|
198 |
+
trust_remote_code=True,
|
199 |
+
low_cpu_mem_usage=True
|
200 |
+
)
|
201 |
+
|
202 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
203 |
+
"ContactDoctor/Bio-Medical-MultiModal-Llama-3-8B-V1",
|
204 |
+
trust_remote_code=True
|
205 |
+
)
|
206 |
+
|
207 |
+
return model, tokenizer
|
208 |
+
except Exception as e:
|
209 |
+
print(f"Failed to initialize LLM: {str(e)}")
|
210 |
+
return None, None
|
|
|
|
|
|
|
|
|
211 |
|
212 |
model = initialize_model()
|
213 |
llm_model, llm_tokenizer = initialize_llm()
|
|
|
250 |
overlay_image[pred_masks[i] > 0.5] = [255, 0, 0]
|
251 |
results.append(overlay_image)
|
252 |
|
253 |
+
# Process with LLM only if available
|
254 |
+
if llm_model is not None and llm_tokenizer is not None:
|
255 |
+
try:
|
256 |
+
pil_image = Image.fromarray(image)
|
257 |
+
question = 'Give the modality, organ, analysis, abnormalities (if any), treatment (if abnormalities are present)?'
|
258 |
+
msgs = [{'role': 'user', 'content': [pil_image, question]}]
|
259 |
+
|
260 |
+
llm_response = ""
|
261 |
+
for new_text in llm_model.chat(
|
262 |
+
image=pil_image,
|
263 |
+
msgs=msgs,
|
264 |
+
tokenizer=llm_tokenizer,
|
265 |
+
sampling=True,
|
266 |
+
temperature=0.95,
|
267 |
+
stream=True
|
268 |
+
):
|
269 |
+
llm_response += new_text
|
270 |
+
|
271 |
+
# Combine both analyses
|
272 |
+
combined_analysis = "\n\n".join([
|
273 |
+
"BiomedParse Analysis:",
|
274 |
+
"\n".join(analysis_results),
|
275 |
+
"\nLLM Analysis:",
|
276 |
+
llm_response
|
277 |
+
])
|
278 |
+
except Exception as e:
|
279 |
+
print(f"LLM analysis failed: {str(e)}")
|
280 |
+
combined_analysis = "\n".join(analysis_results)
|
281 |
+
else:
|
282 |
+
# If LLM is not available, only show BiomedParse results
|
283 |
+
combined_analysis = "\n".join(analysis_results)
|
284 |
|
285 |
return results, combined_analysis
|
286 |
|
requirements.txt
CHANGED
@@ -52,3 +52,13 @@ pydot==3.0.1
|
|
52 |
tabulate==0.9.0
|
53 |
termcolor==2.4.0
|
54 |
tokenizers==0.14.1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
tabulate==0.9.0
|
53 |
termcolor==2.4.0
|
54 |
tokenizers==0.14.1
|
55 |
+
#
|
56 |
+
# torch>=2.0.0
|
57 |
+
# transformers>=4.34.0
|
58 |
+
# gradio>=4.40.0
|
59 |
+
# Pillow>=9.0.0
|
60 |
+
# numpy>=1.21.0
|
61 |
+
tqdm>=4.65.0
|
62 |
+
# huggingface-hub>=0.19.0
|
63 |
+
safetensors>=0.4.0
|
64 |
+
# accelerate>=0.25.0
|