scdrand23 commited on
Commit
ca50e59
·
1 Parent(s): d28e001

major change, used biomed_llama 7b

Browse files
Files changed (1) hide show
  1. app.py +17 -10
app.py CHANGED
@@ -191,6 +191,7 @@ def initialize_model():
191
 
192
  def initialize_llm():
193
  try:
 
194
  model = AutoModel.from_pretrained(
195
  "ContactDoctor/Bio-Medical-MultiModal-Llama-3-8B-V1",
196
  device_map="auto",
@@ -198,12 +199,13 @@ def initialize_llm():
198
  trust_remote_code=True,
199
  low_cpu_mem_usage=True
200
  )
 
201
 
202
  tokenizer = AutoTokenizer.from_pretrained(
203
  "ContactDoctor/Bio-Medical-MultiModal-Llama-3-8B-V1",
204
  trust_remote_code=True
205
  )
206
-
207
  return model, tokenizer
208
  except Exception as e:
209
  print(f"Failed to initialize LLM: {str(e)}")
@@ -252,11 +254,13 @@ def process_image(image_path, text_prompts, modality):
252
 
253
  # Process with LLM only if available
254
  if llm_model is not None and llm_tokenizer is not None:
 
255
  try:
256
  pil_image = Image.fromarray(image)
257
  question = 'Give the modality, organ, analysis, abnormalities (if any), treatment (if abnormalities are present)?'
258
  msgs = [{'role': 'user', 'content': [pil_image, question]}]
259
 
 
260
  llm_response = ""
261
  for new_text in llm_model.chat(
262
  image=pil_image,
@@ -267,19 +271,22 @@ def process_image(image_path, text_prompts, modality):
267
  stream=True
268
  ):
269
  llm_response += new_text
 
 
 
 
 
 
 
 
 
 
270
 
271
- # Combine both analyses
272
- combined_analysis = "\n\n".join([
273
- "BiomedParse Analysis:",
274
- "\n".join(analysis_results),
275
- "\nLLM Analysis:",
276
- llm_response
277
- ])
278
  except Exception as e:
279
- print(f"LLM analysis failed: {str(e)}")
280
  combined_analysis = "\n".join(analysis_results)
281
  else:
282
- # If LLM is not available, only show BiomedParse results
283
  combined_analysis = "\n".join(analysis_results)
284
 
285
  return results, combined_analysis
 
191
 
192
  def initialize_llm():
193
  try:
194
+ print("Starting LLM initialization...")
195
  model = AutoModel.from_pretrained(
196
  "ContactDoctor/Bio-Medical-MultiModal-Llama-3-8B-V1",
197
  device_map="auto",
 
199
  trust_remote_code=True,
200
  low_cpu_mem_usage=True
201
  )
202
+ print("Model loaded successfully")
203
 
204
  tokenizer = AutoTokenizer.from_pretrained(
205
  "ContactDoctor/Bio-Medical-MultiModal-Llama-3-8B-V1",
206
  trust_remote_code=True
207
  )
208
+ print("Tokenizer loaded successfully")
209
  return model, tokenizer
210
  except Exception as e:
211
  print(f"Failed to initialize LLM: {str(e)}")
 
254
 
255
  # Process with LLM only if available
256
  if llm_model is not None and llm_tokenizer is not None:
257
+ print("LLM model and tokenizer are available")
258
  try:
259
  pil_image = Image.fromarray(image)
260
  question = 'Give the modality, organ, analysis, abnormalities (if any), treatment (if abnormalities are present)?'
261
  msgs = [{'role': 'user', 'content': [pil_image, question]}]
262
 
263
+ print("Starting LLM inference...")
264
  llm_response = ""
265
  for new_text in llm_model.chat(
266
  image=pil_image,
 
271
  stream=True
272
  ):
273
  llm_response += new_text
274
+ print(f"LLM generated response: {llm_response}")
275
+
276
+ # Make the combined analysis more visible
277
+ combined_analysis = "\n\n" + "="*50 + "\n"
278
+ combined_analysis += "BiomedParse Analysis:\n"
279
+ combined_analysis += "\n".join(analysis_results)
280
+ combined_analysis += "\n\n" + "="*50 + "\n"
281
+ combined_analysis += "LLM Analysis:\n"
282
+ combined_analysis += llm_response
283
+ combined_analysis += "\n" + "="*50
284
 
 
 
 
 
 
 
 
285
  except Exception as e:
286
+ print(f"LLM analysis failed with error: {str(e)}")
287
  combined_analysis = "\n".join(analysis_results)
288
  else:
289
+ print("LLM model or tokenizer is not available")
290
  combined_analysis = "\n".join(analysis_results)
291
 
292
  return results, combined_analysis