Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -161,6 +161,7 @@ def generate_image(prompt,
|
|
161 |
|
162 |
return [Image.fromarray(images[i]).resize((768, 768), Image.LANCZOS) for i in range(parallel_size)]
|
163 |
|
|
|
164 |
# Gradio interface with improved UI
|
165 |
with gr.Blocks(theme=gr.themes.Soft(
|
166 |
primary_hue="blue",
|
@@ -180,7 +181,7 @@ with gr.Blocks(theme=gr.themes.Soft(
|
|
180 |
|
181 |
with gr.Tabs():
|
182 |
# Visual Chat Tab
|
183 |
-
with gr.Tab("Visual Understanding"
|
184 |
with gr.Row(equal_height=True):
|
185 |
with gr.Column(scale=1):
|
186 |
image_input = gr.Image(
|
@@ -231,29 +232,19 @@ with gr.Blocks(theme=gr.themes.Soft(
|
|
231 |
show_copy_button=True
|
232 |
)
|
233 |
|
234 |
-
gr.Examples
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
Detailed Image Analysis & Percentage Breakdown: Analyze the image, addressing each area listed in Step 1. For each area:
|
242 |
-
Provide a highly detailed, objective description, using precise ophthalmological terminology. Quantify observations whenever possible (e.g., cup-to-disc ratio, A/V ratio).
|
243 |
-
State the percentage of that area you were able to confidently analyze, based on image quality and clarity. For example: "Optic Disc: 90% analyzable (10% obscured by slight blurring at the superior margin)." "Macula: 100% analyzable." "Peripheral Retina (Nasal): 60% analyzable (40% not visible in the image)." Be precise.
|
244 |
-
For any areas where analysis is incomplete (<100%), briefly explain the limiting factor (e.g., poor focus, limited field of view, artifact).
|
245 |
-
Differential Diagnosis (Image-Based Only): Based solely on your Step 2 analysis, provide:
|
246 |
-
Most Likely Diagnosis (from image findings).
|
247 |
-
Other Possible Diagnoses (from image findings).
|
248 |
-
Rationale: For each diagnosis, briefly link specific image findings to the diagnostic criteria.""",
|
249 |
-
"fundus.webp",
|
250 |
],
|
251 |
-
|
252 |
-
|
253 |
-
)
|
254 |
|
255 |
# Image Generation Tab
|
256 |
-
with gr.Tab("Image Generation"
|
257 |
with gr.Column():
|
258 |
prompt_input = gr.Textbox(
|
259 |
label="Image Description",
|
@@ -299,18 +290,17 @@ Rationale: For each diagnosis, briefly link specific image findings to the diagn
|
|
299 |
object_fit="contain"
|
300 |
)
|
301 |
|
302 |
-
gr.Examples
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
)
|
314 |
|
315 |
# Connect components
|
316 |
understanding_button.click(
|
@@ -338,10 +328,6 @@ demo.load(css="""
|
|
338 |
border-radius: 8px;
|
339 |
border: 2px solid #eee;
|
340 |
}
|
341 |
-
.tabs.tab-nav {
|
342 |
-
border-bottom: 2px solid #eee;
|
343 |
-
margin-bottom: 2rem;
|
344 |
-
}
|
345 |
.tab-nav {
|
346 |
background: white;
|
347 |
padding: 1rem;
|
@@ -379,4 +365,4 @@ demo.load(css="""
|
|
379 |
# Launch the demo
|
380 |
if __name__ == "__main__":
|
381 |
demo.launch(share=True)
|
382 |
-
|
|
|
161 |
|
162 |
return [Image.fromarray(images[i]).resize((768, 768), Image.LANCZOS) for i in range(parallel_size)]
|
163 |
|
164 |
+
|
165 |
# Gradio interface with improved UI
|
166 |
with gr.Blocks(theme=gr.themes.Soft(
|
167 |
primary_hue="blue",
|
|
|
181 |
|
182 |
with gr.Tabs():
|
183 |
# Visual Chat Tab
|
184 |
+
with gr.Tab("Visual Understanding"): # Removed icon parameter
|
185 |
with gr.Row(equal_height=True):
|
186 |
with gr.Column(scale=1):
|
187 |
image_input = gr.Image(
|
|
|
232 |
show_copy_button=True
|
233 |
)
|
234 |
|
235 |
+
with gr.Accordion("Medical Analysis Examples", open=False):
|
236 |
+
gr.Examples(
|
237 |
+
examples=[
|
238 |
+
[
|
239 |
+
"""You are an AI assistant trained to analyze medical images...""",
|
240 |
+
"fundus.webp",
|
241 |
+
],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
242 |
],
|
243 |
+
inputs=[question_input, image_input],
|
244 |
+
)
|
|
|
245 |
|
246 |
# Image Generation Tab
|
247 |
+
with gr.Tab("Image Generation"): # Removed icon parameter
|
248 |
with gr.Column():
|
249 |
prompt_input = gr.Textbox(
|
250 |
label="Image Description",
|
|
|
290 |
object_fit="contain"
|
291 |
)
|
292 |
|
293 |
+
with gr.Accordion("Generation Examples", open=False):
|
294 |
+
gr.Examples(
|
295 |
+
examples=[
|
296 |
+
"Master shifu racoon wearing drip attire as a street gangster.",
|
297 |
+
"The face of a beautiful girl",
|
298 |
+
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
|
299 |
+
"A glass of red wine on a reflective surface.",
|
300 |
+
"A cute and adorable baby fox with big brown eyes...",
|
301 |
+
],
|
302 |
+
inputs=prompt_input,
|
303 |
+
)
|
|
|
304 |
|
305 |
# Connect components
|
306 |
understanding_button.click(
|
|
|
328 |
border-radius: 8px;
|
329 |
border: 2px solid #eee;
|
330 |
}
|
|
|
|
|
|
|
|
|
331 |
.tab-nav {
|
332 |
background: white;
|
333 |
padding: 1rem;
|
|
|
365 |
# Launch the demo
|
366 |
if __name__ == "__main__":
|
367 |
demo.launch(share=True)
|
368 |
+
|