maximebrenon's picture
Update src/app.py
37cf1cd verified
import dataclasses as dc
import io
from functools import cache
from typing import Any
import gradio as gr
import pillow_heif
from environs import Env
from finegrain import BoundingBox, EditorAPIContext, EraseResultWithImage, ErrorResult
from gradio_image_annotation import image_annotator
from gradio_imageslider import ImageSlider
from PIL import Image
from typing_extensions import TypeIs
pillow_heif.register_heif_opener()
pillow_heif.register_avif_opener()
env = Env()
env.read_env()
with env.prefixed("ERASER_"):
API_USER: str | None = env.str("API_USER")
API_PASSWORD: str | None = env.str("API_PASSWORD")
API_URL: str | None = env.str("API_URL", None)
CA_BUNDLE: str | None = env.str("CA_BUNDLE", None)
@cache
def _ctx() -> EditorAPIContext:
assert API_USER is not None
assert API_PASSWORD is not None
ctx = EditorAPIContext(
user=API_USER,
password=API_PASSWORD,
priority="low",
user_agent="fg-hf-eraser",
)
if CA_BUNDLE:
ctx.verify = CA_BUNDLE
if API_URL:
ctx.base_url = API_URL
return ctx
def is_error(result: Any) -> TypeIs[ErrorResult]:
if isinstance(result, ErrorResult):
raise RuntimeError(result.error)
return False
def resize(image: Image.Image, shortest_side: int = 768) -> Image.Image:
if image.width <= shortest_side and image.height <= shortest_side:
return image
if image.width < image.height:
return image.resize(size=(shortest_side, int(shortest_side * image.height / image.width)))
return image.resize(size=(int(shortest_side * image.width / image.height), shortest_side))
@dc.dataclass(kw_only=True)
class ProcessParams:
image: Image.Image
prompt: str | None = None
bbox: BoundingBox | None = None
async def _process(ctx: EditorAPIContext, params: ProcessParams) -> Image.Image:
with io.BytesIO() as f:
params.image.save(f, format="JPEG")
st_input = await ctx.call_async.upload_image(f)
if params.bbox:
segment_input_st, segment_bbox = st_input, params.bbox
else:
assert params.prompt
bbox_r = await ctx.call_async.infer_bbox(st_input, params.prompt)
assert not is_error(bbox_r)
segment_input_st, segment_bbox = bbox_r.state_id, None
mask_r = await ctx.call_async.segment(segment_input_st, bbox=segment_bbox)
assert not is_error(mask_r)
erased_r = await ctx.call_async.erase(st_input, mask_r.state_id, mode="express", with_image=True)
assert not is_error(erased_r)
assert isinstance(erased_r, EraseResultWithImage)
f = io.BytesIO()
f.write(erased_r.image)
f.seek(0)
return Image.open(f)
def process_bbox(prompts: dict[str, Any]) -> tuple[Image.Image, Image.Image]:
assert isinstance(img := prompts["image"], Image.Image)
assert isinstance(boxes := prompts["boxes"], list)
assert len(boxes) == 1
assert isinstance(box := boxes[0], dict)
resized_img = resize(img)
bbox = [box[k] for k in ["xmin", "ymin", "xmax", "ymax"]]
if resized_img.width != img.width:
bbox = [int(v * resized_img.width / img.width) for v in bbox]
output_image = _ctx().run_one_sync(
_process,
ProcessParams(
image=resized_img,
bbox=(bbox[0], bbox[1], bbox[2], bbox[3]),
),
)
return (img, output_image)
def on_change_bbox(prompts: dict[str, Any] | None):
return gr.update(interactive=prompts is not None and len(prompts["boxes"]) > 0)
def process_prompt(img: Image.Image, prompt: str) -> tuple[Image.Image, Image.Image]:
resized_img = resize(img)
output_image = _ctx().run_one_sync(
_process,
ProcessParams(image=resized_img, prompt=prompt),
)
return (img, output_image)
def on_change_prompt(img: Image.Image | None, prompt: str | None):
return gr.update(interactive=bool(img and prompt))
TITLE = """
<h1>Finegrain Object Eraser (Lite Version)</h1>
<p>
Erase any object, along with its shadows and reflections, just by naming it!
</p>
<p>
πŸ”Œ For high-resolution results with scene preservation, superior shadow/reflection removal
and enhanced missing pixel generation, <a href="https://finegrain.ai">try the Finegrain API</a> ! πŸ”Œ
</p>
<p>
<a href="https://discord.gg/zFKg5TjXub" target="_blank">[Discord]</a>
<a href="https://github.com/finegrain-ai" target="_blank">[GitHub]</a>
<a href="https://finegrain.ai">[Finegrain API]</a>
</p>
"""
with gr.Blocks() as demo:
gr.HTML(TITLE)
with gr.Tab("By prompt", id="tab_prompt"):
with gr.Row():
with gr.Column():
iimg = gr.Image(type="pil", label="Input")
prompt = gr.Textbox(label="What should we erase?")
with gr.Column():
oimg = ImageSlider(label="Output")
with gr.Row():
btn = gr.ClearButton(components=[oimg], value="Erase Object", interactive=False)
for inp in [iimg, prompt]:
inp.change(
fn=on_change_prompt,
inputs=[iimg, prompt],
outputs=[btn],
)
btn.click(
fn=process_prompt,
inputs=[iimg, prompt],
outputs=[oimg],
api_name=False,
)
examples = [
[
"examples/white-towels-rattan-basket-white-table-with-bright-room-background.jpg",
"soap",
],
[
"examples/interior-decor-with-mirror-potted-plant.jpg",
"potted plant",
],
[
"examples/detail-ball-basketball-court-sunset.jpg",
"basketball",
],
[
"examples/still-life-device-table_23-2150994394.jpg",
"glass of water",
],
[
"examples/knife-fork-green-checkered-napkin_140725-63576.jpg",
"knife and fork",
],
[
"examples/city-night-with-architecture-vibrant-lights_23-2149836930.jpg",
"frontmost black car on right lane",
],
[
"examples/close-up-coffee-latte-wooden-table_23-2147893063.jpg",
"coffee cup on plate",
],
[
"examples/empty-chair-with-vase-plant_74190-2078.jpg",
"chair",
],
]
ex = gr.Examples(
examples=examples,
inputs=[iimg, prompt],
outputs=[oimg],
fn=process_prompt,
cache_examples=True,
)
with gr.Tab("By bounding box", id="tab_bb"):
with gr.Row():
with gr.Column():
annotator = image_annotator(
image_type="pil",
disable_edit_boxes=True,
show_download_button=False,
show_share_button=False,
single_box=True,
label="Input",
)
with gr.Column():
oimg = ImageSlider(label="Output")
with gr.Row():
btn = gr.ClearButton(components=[oimg], value="Erase Object", interactive=False)
annotator.change(
fn=on_change_bbox,
inputs=[annotator],
outputs=[btn],
)
btn.click(
fn=process_bbox,
inputs=[annotator],
outputs=[oimg],
api_name=False,
)
examples = [
{
"image": "examples/white-towels-rattan-basket-white-table-with-bright-room-background.jpg",
"boxes": [{"xmin": 836, "ymin": 475, "xmax": 1125, "ymax": 1013}],
},
{
"image": "examples/interior-decor-with-mirror-potted-plant.jpg",
"boxes": [{"xmin": 47, "ymin": 907, "xmax": 397, "ymax": 1633}],
},
{
"image": "examples/detail-ball-basketball-court-sunset.jpg",
"boxes": [{"xmin": 673, "ymin": 954, "xmax": 911, "ymax": 1186}],
},
{
"image": "examples/still-life-device-table_23-2150994394.jpg",
"boxes": [{"xmin": 429, "ymin": 586, "xmax": 571, "ymax": 834}],
},
{
"image": "examples/knife-fork-green-checkered-napkin_140725-63576.jpg",
"boxes": [{"xmin": 972, "ymin": 226, "xmax": 1092, "ymax": 1023}],
},
{
"image": "examples/city-night-with-architecture-vibrant-lights_23-2149836930.jpg",
"boxes": [{"xmin": 215, "ymin": 637, "xmax": 411, "ymax": 855}],
},
{
"image": "examples/close-up-coffee-latte-wooden-table_23-2147893063.jpg",
"boxes": [{"xmin": 255, "ymin": 456, "xmax": 1080, "ymax": 1064}],
},
{
"image": "examples/empty-chair-with-vase-plant_74190-2078.jpg",
"boxes": [{"xmin": 35, "ymin": 320, "xmax": 383, "ymax": 983}],
},
]
ex = gr.Examples(
examples=examples,
inputs=[annotator],
outputs=[oimg],
fn=process_bbox,
cache_examples=True,
)
demo.queue(max_size=30, api_open=False)
demo.launch(show_api=False)