Update app.py
Browse files
app.py
CHANGED
@@ -104,16 +104,18 @@ def generate_report(frontal_path, lateral_path, indication, technique, compariso
|
|
104 |
return_tensors="pt",
|
105 |
get_grounding=grounding
|
106 |
).to("cpu")
|
107 |
-
|
108 |
-
|
109 |
-
|
|
|
|
|
110 |
outputs = MODEL_STATE["model"].generate(
|
111 |
-
**
|
112 |
max_new_tokens=450 if grounding else 300,
|
113 |
use_cache=True
|
114 |
)
|
115 |
|
116 |
-
prompt_length =
|
117 |
decoded = MODEL_STATE["processor"].decode(outputs[0][prompt_length:], skip_special_tokens=True)
|
118 |
return MODEL_STATE["processor"].convert_output_to_plaintext_or_grounded_sequence(decoded.lstrip())
|
119 |
|
@@ -135,16 +137,18 @@ def ground_phrase(frontal_path, phrase):
|
|
135 |
phrase=phrase,
|
136 |
return_tensors="pt"
|
137 |
).to("cpu")
|
138 |
-
|
139 |
-
|
|
|
|
|
140 |
|
141 |
outputs = MODEL_STATE["model"].generate(
|
142 |
-
**
|
143 |
max_new_tokens=150,
|
144 |
use_cache=True
|
145 |
)
|
146 |
|
147 |
-
prompt_length =
|
148 |
decoded = MODEL_STATE["processor"].decode(outputs[0][prompt_length:], skip_special_tokens=True)
|
149 |
return MODEL_STATE["processor"].convert_output_to_plaintext_or_grounded_sequence(decoded)
|
150 |
|
@@ -229,4 +233,4 @@ with gr.Blocks(title="MAIRA-2 Medical Assistant") as demo:
|
|
229 |
outputs=pg_output
|
230 |
)
|
231 |
|
232 |
-
demo.launch()
|
|
|
104 |
return_tensors="pt",
|
105 |
get_grounding=grounding
|
106 |
).to("cpu")
|
107 |
+
|
108 |
+
|
109 |
+
if "image_sizes" in processed:
|
110 |
+
processed.pop("image_sizes")
|
111 |
+
|
112 |
outputs = MODEL_STATE["model"].generate(
|
113 |
+
**processed,
|
114 |
max_new_tokens=450 if grounding else 300,
|
115 |
use_cache=True
|
116 |
)
|
117 |
|
118 |
+
prompt_length = processed["input_ids"].shape[-1]
|
119 |
decoded = MODEL_STATE["processor"].decode(outputs[0][prompt_length:], skip_special_tokens=True)
|
120 |
return MODEL_STATE["processor"].convert_output_to_plaintext_or_grounded_sequence(decoded.lstrip())
|
121 |
|
|
|
137 |
phrase=phrase,
|
138 |
return_tensors="pt"
|
139 |
).to("cpu")
|
140 |
+
|
141 |
+
|
142 |
+
if "image_sizes" in processed:
|
143 |
+
processed.pop("image_sizes")
|
144 |
|
145 |
outputs = MODEL_STATE["model"].generate(
|
146 |
+
**processed,
|
147 |
max_new_tokens=150,
|
148 |
use_cache=True
|
149 |
)
|
150 |
|
151 |
+
prompt_length = processed["input_ids"].shape[-1]
|
152 |
decoded = MODEL_STATE["processor"].decode(outputs[0][prompt_length:], skip_special_tokens=True)
|
153 |
return MODEL_STATE["processor"].convert_output_to_plaintext_or_grounded_sequence(decoded)
|
154 |
|
|
|
233 |
outputs=pg_output
|
234 |
)
|
235 |
|
236 |
+
demo.launch()
|