scdrand23 commited on
Commit
d28e001
·
1 Parent(s): 0c20836

major change, used biomed_llama 7b

Browse files
Files changed (2) hide show
  1. app.py +49 -45
  2. requirements.txt +10 -0
app.py CHANGED
@@ -190,28 +190,24 @@ def initialize_model():
190
  return model
191
 
192
  def initialize_llm():
193
- bnb_config = BitsAndBytesConfig(
194
- load_in_4bit=True,
195
- bnb_4bit_quant_type="nf4",
196
- bnb_4bit_use_double_quant=True,
197
- bnb_4bit_compute_dtype=torch.float16
198
- )
199
-
200
- model = AutoModel.from_pretrained(
201
- "ContactDoctor/Bio-Medical-MultiModal-Llama-3-8B-V1",
202
- quantization_config=bnb_config,
203
- device_map="auto",
204
- torch_dtype=torch.float16,
205
- trust_remote_code=True,
206
- attn_implementation="flash_attention_2"
207
- )
208
-
209
- tokenizer = AutoTokenizer.from_pretrained(
210
- "ContactDoctor/Bio-Medical-MultiModal-Llama-3-8B-V1",
211
- trust_remote_code=True
212
- )
213
-
214
- return model, tokenizer
215
 
216
  model = initialize_model()
217
  llm_model, llm_tokenizer = initialize_llm()
@@ -254,29 +250,37 @@ def process_image(image_path, text_prompts, modality):
254
  overlay_image[pred_masks[i] > 0.5] = [255, 0, 0]
255
  results.append(overlay_image)
256
 
257
- # Process with LLM
258
- pil_image = Image.fromarray(image)
259
- question = 'Give the modality, organ, analysis, abnormalities (if any), treatment (if abnormalities are present)?'
260
- msgs = [{'role': 'user', 'content': [pil_image, question]}]
261
-
262
- llm_response = ""
263
- for new_text in llm_model.chat(
264
- image=pil_image,
265
- msgs=msgs,
266
- tokenizer=llm_tokenizer,
267
- sampling=True,
268
- temperature=0.95,
269
- stream=True
270
- ):
271
- llm_response += new_text
272
-
273
- # Combine both analyses
274
- combined_analysis = "\n\n".join([
275
- "BiomedParse Analysis:",
276
- "\n".join(analysis_results),
277
- "\nLLM Analysis:",
278
- llm_response
279
- ])
 
 
 
 
 
 
 
 
280
 
281
  return results, combined_analysis
282
 
 
190
  return model
191
 
192
  def initialize_llm():
193
+ try:
194
+ model = AutoModel.from_pretrained(
195
+ "ContactDoctor/Bio-Medical-MultiModal-Llama-3-8B-V1",
196
+ device_map="auto",
197
+ torch_dtype=torch.float16,
198
+ trust_remote_code=True,
199
+ low_cpu_mem_usage=True
200
+ )
201
+
202
+ tokenizer = AutoTokenizer.from_pretrained(
203
+ "ContactDoctor/Bio-Medical-MultiModal-Llama-3-8B-V1",
204
+ trust_remote_code=True
205
+ )
206
+
207
+ return model, tokenizer
208
+ except Exception as e:
209
+ print(f"Failed to initialize LLM: {str(e)}")
210
+ return None, None
 
 
 
 
211
 
212
  model = initialize_model()
213
  llm_model, llm_tokenizer = initialize_llm()
 
250
  overlay_image[pred_masks[i] > 0.5] = [255, 0, 0]
251
  results.append(overlay_image)
252
 
253
+ # Process with LLM only if available
254
+ if llm_model is not None and llm_tokenizer is not None:
255
+ try:
256
+ pil_image = Image.fromarray(image)
257
+ question = 'Give the modality, organ, analysis, abnormalities (if any), treatment (if abnormalities are present)?'
258
+ msgs = [{'role': 'user', 'content': [pil_image, question]}]
259
+
260
+ llm_response = ""
261
+ for new_text in llm_model.chat(
262
+ image=pil_image,
263
+ msgs=msgs,
264
+ tokenizer=llm_tokenizer,
265
+ sampling=True,
266
+ temperature=0.95,
267
+ stream=True
268
+ ):
269
+ llm_response += new_text
270
+
271
+ # Combine both analyses
272
+ combined_analysis = "\n\n".join([
273
+ "BiomedParse Analysis:",
274
+ "\n".join(analysis_results),
275
+ "\nLLM Analysis:",
276
+ llm_response
277
+ ])
278
+ except Exception as e:
279
+ print(f"LLM analysis failed: {str(e)}")
280
+ combined_analysis = "\n".join(analysis_results)
281
+ else:
282
+ # If LLM is not available, only show BiomedParse results
283
+ combined_analysis = "\n".join(analysis_results)
284
 
285
  return results, combined_analysis
286
 
requirements.txt CHANGED
@@ -52,3 +52,13 @@ pydot==3.0.1
52
  tabulate==0.9.0
53
  termcolor==2.4.0
54
  tokenizers==0.14.1
 
 
 
 
 
 
 
 
 
 
 
52
  tabulate==0.9.0
53
  termcolor==2.4.0
54
  tokenizers==0.14.1
55
+ #
56
+ # torch>=2.0.0
57
+ # transformers>=4.34.0
58
+ # gradio>=4.40.0
59
+ # Pillow>=9.0.0
60
+ # numpy>=1.21.0
61
+ tqdm>=4.65.0
62
+ # huggingface-hub>=0.19.0
63
+ safetensors>=0.4.0
64
+ # accelerate>=0.25.0