File size: 2,958 Bytes
29580f3 c3fc836 29580f3 07b52cf e94d15e 29580f3 c3fc836 29580f3 02bc5ce 29580f3 07b52cf 29580f3 dfd748f 29580f3 07b52cf 29580f3 07b52cf dfd748f d0f5140 07b52cf e94d15e 07b52cf d775f14 07b52cf d0f5140 29580f3 e94d15e 29580f3 01ec9b6 29580f3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
import gradio as gr
from gradio_image_annotation import image_annotator
import numpy as np
example_annotation = {
"image": "https://gradio-builds.s3.amazonaws.com/demo-files/base.png",
"boxes": [
{
"xmin": 636,
"ymin": 575,
"xmax": 801,
"ymax": 697,
"label": "Vehicle",
"color": (255, 0, 0)
},
{
"xmin": 360,
"ymin": 615,
"xmax": 386,
"ymax": 702,
"label": "Person",
"color": (0, 255, 0)
}
]
}
examples_crop = [
{
"image": "https://raw.githubusercontent.com/gradio-app/gradio/main/guides/assets/logo.png",
"boxes": [
{
"xmin": 30,
"ymin": 70,
"xmax": 530,
"ymax": 500,
"color": (100, 200, 255),
}
],
},
{
"image": "https://gradio-builds.s3.amazonaws.com/demo-files/base.png",
"boxes": [
{
"xmin": 636,
"ymin": 575,
"xmax": 801,
"ymax": 697,
"color": (255, 0, 0),
},
],
},
]
def crop(annotations):
if angle := annotations.get("orientation", None):
annotations["image"] = np.rot90(annotations["image"], k=-angle)
if annotations["boxes"]:
box = annotations["boxes"][0]
return annotations["image"][
box["ymin"]:box["ymax"],
box["xmin"]:box["xmax"]
]
return None
def get_boxes_json(annotations):
return annotations["boxes"]
with gr.Blocks() as demo:
with gr.Tab("Object annotation", id="tab_object_annotation"):
annotator = image_annotator(
example_annotation,
label_list=["Person", "Vehicle"],
label_colors=[(0, 255, 0), (255, 0, 0)],
)
button_get = gr.Button("Get bounding boxes")
json_boxes = gr.JSON()
button_get.click(get_boxes_json, annotator, json_boxes)
with gr.Tab("Crop", id="tab_crop"):
with gr.Row():
annotator_crop = image_annotator(
examples_crop[0],
image_type="numpy",
disable_edit_boxes=True,
single_box=True,
)
image_crop = gr.Image()
button_crop = gr.Button("Crop")
button_crop.click(crop, annotator_crop, image_crop)
gr.Examples(examples_crop, annotator_crop)
with gr.Accordion("Keyboard Shortcuts"):
gr.Markdown("""
- ``C``: Create mode
- ``D``: Drag mode
- ``E``: Edit selected box (same as double-click a box)
- ``Delete``: Remove selected box
- ``Space``: Reset view (zoom/pan)
- ``Enter``: Confirm modal dialog
- ``Escape``: Cancel/close modal dialog
""")
if __name__ == "__main__":
demo.launch()
|