Update app.py
Browse files
app.py
CHANGED
@@ -422,7 +422,7 @@ with gr.Blocks() as demo:
|
|
422 |
|
423 |
img_input = gr.ImageEditor()
|
424 |
model_select = gr.Dropdown(
|
425 |
-
["GLEE-Lite (R50)", "GLEE-Plus (SwinL)"], value = "GLEE-
|
426 |
)
|
427 |
with gr.Row():
|
428 |
with gr.Column():
|
@@ -444,7 +444,14 @@ with gr.Blocks() as demo:
|
|
444 |
)
|
445 |
# with gr.Column():
|
446 |
with gr.Group():
|
447 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
448 |
with gr.Accordion("Interactive segmentation usage",open=False):
|
449 |
gr.Markdown(
|
450 |
'For interactive segmentation:<br />\
|
@@ -452,13 +459,7 @@ with gr.Blocks() as demo:
|
|
452 |
2.Point mode accepts a single point only; multiple points default to the centroid, so use boxes or scribbles for larger objects.<br />\
|
453 |
3.After drawing, click green "√" on the right side of the image to preview the prompt visualization; the segmentation mask follows the chosen prompt colors.'
|
454 |
)
|
455 |
-
|
456 |
-
gr.Markdown(
|
457 |
-
'GLEE supports three kind of object perception methods: category list, textual description, and class-agnostic.<br />\
|
458 |
-
1.Select an existing category list from the "Categories" dropdown, like COCO or OBJ365, or customize your own list.<br />\
|
459 |
-
2.Enter arbitrary object name in "Custom Category", or choose the expression model and describe the object in "Expression Textbox" for single object detection only.<br />\
|
460 |
-
3.For class-agnostic mode, choose "Class-Agnostic" from the "Categories" dropdown.'
|
461 |
-
)
|
462 |
img_showbox = gr.Image(label="visual prompt area preview")
|
463 |
|
464 |
|
|
|
422 |
|
423 |
img_input = gr.ImageEditor()
|
424 |
model_select = gr.Dropdown(
|
425 |
+
["GLEE-Lite (R50)", "GLEE-Plus (SwinL)"], value = "GLEE-Plus (SwinL)" , multiselect=False, label="Model",
|
426 |
)
|
427 |
with gr.Row():
|
428 |
with gr.Column():
|
|
|
444 |
)
|
445 |
# with gr.Column():
|
446 |
with gr.Group():
|
447 |
+
with gr.Accordion("Text based detection usage",open=False):
|
448 |
+
gr.Markdown(
|
449 |
+
'Press the "Detect & Segment" button directly to try the effect using the COCO category.<br />\
|
450 |
+
GLEE supports three kind of object perception methods: category list, textual description, and class-agnostic.<br />\
|
451 |
+
1.Select an existing category list from the "Categories" dropdown, like COCO or OBJ365, or customize your own list.<br />\
|
452 |
+
2.Enter arbitrary object name in "Custom Category", or choose the expression model and describe the object in "Expression Textbox" for single object detection only.<br />\
|
453 |
+
3.For class-agnostic mode, choose "Class-Agnostic" from the "Categories" dropdown.'
|
454 |
+
)
|
455 |
with gr.Accordion("Interactive segmentation usage",open=False):
|
456 |
gr.Markdown(
|
457 |
'For interactive segmentation:<br />\
|
|
|
459 |
2.Point mode accepts a single point only; multiple points default to the centroid, so use boxes or scribbles for larger objects.<br />\
|
460 |
3.After drawing, click green "√" on the right side of the image to preview the prompt visualization; the segmentation mask follows the chosen prompt colors.'
|
461 |
)
|
462 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
463 |
img_showbox = gr.Image(label="visual prompt area preview")
|
464 |
|
465 |
|