vignesha7 commited on
Commit
b5ad8d3
·
verified ·
1 Parent(s): c5709c7

Create app.py with inference

Browse files

BrainMRI Radiology Expert: A Qwen2-VL-2B-Instruct model fine tuned on brain mri images from spr-serena/mri_scans_labelled.
It can receive a 2D brain scan slice or image and provide a brief description of the image.

Files changed (1) hide show
  1. app.py +61 -0
app.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # #Inference using gradio
2
+ from peft import PeftModel
3
+ from transformers import Qwen2VLForConditionalGeneration
4
+ from transformers import AutoProcessor
5
+
6
+ #load the base model and finetuned adapter
7
+ base_model = Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-2B-Instruct")
8
+ model = PeftModel.from_pretrained(base_model, "vignesha7/qwen2-2b-instruct-Brain-MRI-Description")
9
+
10
+ processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct")
11
+
12
+ #inference function
13
+ def generate_description(sample):
14
+ system_message = "You are an expert MRI radiographer. you can describe what you see in the mri image"
15
+
16
+ prompt = "Describe accurately what you see in this radiology image."
17
+ messages = [
18
+ { "role": "system",
19
+ "content": [{"type": "text", "text": system_message}]
20
+ },
21
+ { "role": "user",
22
+ "content" : [
23
+ {"type" : "text", "text" : prompt},
24
+ {"type" : "image", "image" : sample}]
25
+ },
26
+ ]
27
+ # Preparation for inference
28
+ text = processor.apply_chat_template(
29
+ messages, tokenize=False, add_generation_prompt=True
30
+ )
31
+ image_inputs, video_inputs = process_vision_info(messages)
32
+ inputs = processor(
33
+ text=[text],
34
+ images=image_inputs,
35
+ videos=video_inputs,
36
+ padding=True,
37
+ return_tensors="pt",
38
+ )
39
+ inputs = inputs.to(model.device)
40
+ # Inference: Generation of the output
41
+ generated_ids = model.generate(**inputs, max_new_tokens=256, top_p=1.0, do_sample=True, temperature=0.8)
42
+ generated_ids_trimmed = [out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
43
+ output_text = processor.batch_decode(
44
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
45
+ )
46
+ return output_text[0]
47
+
48
+ ### Gradio app ###
49
+ title = "BrainMRI Radiology Expert"
50
+ description = "An Qwen2-VL-2B-Instruct model fine tuned on brain mri images.Describes the brain image"
51
+
52
+ demo = gr.Interface(
53
+ fn=generate_description,
54
+ inputs=gr.Image(type='pil'),
55
+ outputs='text',
56
+ title=title,
57
+ description=description,
58
+ )
59
+
60
+ demo.launch()
61
+