Spaces:
Running
on
Zero
Running
on
Zero
xinjie.wang
commited on
Commit
·
69f4bd5
1
Parent(s):
1f34dd4
update
Browse files
app.py
CHANGED
@@ -35,12 +35,19 @@ with gr.Blocks(
|
|
35 |
with gr.Tab(
|
36 |
label="Image(auto seg)", id=0
|
37 |
) as single_image_input_tab:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
image_prompt = gr.Image(
|
39 |
label="Input Image",
|
40 |
format="png",
|
41 |
image_mode="RGBA",
|
42 |
type="pil",
|
43 |
-
|
44 |
)
|
45 |
gr.Markdown(
|
46 |
"""
|
@@ -52,6 +59,9 @@ with gr.Blocks(
|
|
52 |
) as samimage_input_tab:
|
53 |
with gr.Row():
|
54 |
with gr.Column(scale=1):
|
|
|
|
|
|
|
55 |
image_prompt_sam = gr.Image(
|
56 |
label="Input Image", type="numpy", height=400
|
57 |
)
|
@@ -183,15 +193,15 @@ with gr.Blocks(
|
|
183 |
examples = gr.Examples(
|
184 |
label="Image Gallery",
|
185 |
examples=[
|
186 |
-
[f"assets/example_image/{image}"]
|
187 |
for image in os.listdir(
|
188 |
-
"assets/example_image"
|
189 |
)
|
190 |
],
|
191 |
-
inputs=[
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
examples_per_page=10,
|
196 |
)
|
197 |
|
@@ -199,15 +209,15 @@ with gr.Blocks(
|
|
199 |
examples = gr.Examples(
|
200 |
label="Image Gallery",
|
201 |
examples=[
|
202 |
-
f"assets/example_image/{image}"
|
203 |
for image in os.listdir(
|
204 |
-
"assets/example_image"
|
205 |
)
|
206 |
],
|
207 |
-
inputs=[
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
examples_per_page=10,
|
212 |
)
|
213 |
with gr.Column(scale=1):
|
@@ -221,12 +231,22 @@ with gr.Blocks(
|
|
221 |
label="Gaussian Representation", height=300, interactive=False
|
222 |
)
|
223 |
aligned_gs = gr.Textbox(visible=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
224 |
with gr.Row():
|
225 |
model_output_mesh = gr.Model3D(
|
226 |
label="Mesh Representation",
|
227 |
height=300,
|
228 |
interactive=False,
|
229 |
clear_color=[1, 1, 1, 1],
|
|
|
230 |
)
|
231 |
gr.Markdown(
|
232 |
""" The rendering of `Gaussian Representation` takes additional 10s. """ # noqa
|
@@ -252,11 +272,11 @@ with gr.Blocks(
|
|
252 |
outputs=[is_samimage, single_sam_image_example, single_image_example],
|
253 |
)
|
254 |
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
image_prompt.change(
|
261 |
lambda: tuple(
|
262 |
[
|
@@ -296,20 +316,17 @@ with gr.Blocks(
|
|
296 |
],
|
297 |
)
|
298 |
image_prompt.change(
|
299 |
-
preprocess_image_fn,
|
300 |
-
inputs=image_prompt,
|
301 |
-
outputs=image_prompt,
|
302 |
-
).success(
|
303 |
active_btn_by_content,
|
304 |
inputs=image_prompt,
|
305 |
outputs=generate_btn,
|
306 |
)
|
307 |
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
|
|
313 |
image_prompt_sam.change(
|
314 |
lambda: tuple(
|
315 |
[
|
@@ -349,10 +366,6 @@ with gr.Blocks(
|
|
349 |
image_mask_sam,
|
350 |
selected_points,
|
351 |
],
|
352 |
-
).success(
|
353 |
-
preprocess_sam_image_fn,
|
354 |
-
inputs=[image_prompt_sam],
|
355 |
-
outputs=[image_prompt_sam],
|
356 |
)
|
357 |
|
358 |
image_prompt_sam.select(
|
|
|
35 |
with gr.Tab(
|
36 |
label="Image(auto seg)", id=0
|
37 |
) as single_image_input_tab:
|
38 |
+
image_prompt_raw = gr.Image(
|
39 |
+
label="Input Image(raw)",
|
40 |
+
format="png",
|
41 |
+
image_mode="RGBA",
|
42 |
+
type="pil",
|
43 |
+
height=300,
|
44 |
+
)
|
45 |
image_prompt = gr.Image(
|
46 |
label="Input Image",
|
47 |
format="png",
|
48 |
image_mode="RGBA",
|
49 |
type="pil",
|
50 |
+
visible=False,
|
51 |
)
|
52 |
gr.Markdown(
|
53 |
"""
|
|
|
59 |
) as samimage_input_tab:
|
60 |
with gr.Row():
|
61 |
with gr.Column(scale=1):
|
62 |
+
image_prompt_sam_raw = gr.Image(
|
63 |
+
label="Input Image(raw)", type="numpy", visible=False,
|
64 |
+
)
|
65 |
image_prompt_sam = gr.Image(
|
66 |
label="Input Image", type="numpy", height=400
|
67 |
)
|
|
|
193 |
examples = gr.Examples(
|
194 |
label="Image Gallery",
|
195 |
examples=[
|
196 |
+
[f"scripts/apps/assets/example_image/{image}"]
|
197 |
for image in os.listdir(
|
198 |
+
"scripts/apps/assets/example_image"
|
199 |
)
|
200 |
],
|
201 |
+
inputs=[image_prompt_raw],
|
202 |
+
fn=preprocess_image_fn,
|
203 |
+
outputs=[image_prompt],
|
204 |
+
run_on_click=True,
|
205 |
examples_per_page=10,
|
206 |
)
|
207 |
|
|
|
209 |
examples = gr.Examples(
|
210 |
label="Image Gallery",
|
211 |
examples=[
|
212 |
+
f"scripts/apps/assets/example_image/{image}"
|
213 |
for image in os.listdir(
|
214 |
+
"scripts/apps/assets/example_image"
|
215 |
)
|
216 |
],
|
217 |
+
inputs=[image_prompt_sam_raw],
|
218 |
+
fn=preprocess_sam_image_fn,
|
219 |
+
outputs=[image_prompt_sam],
|
220 |
+
run_on_click=True,
|
221 |
examples_per_page=10,
|
222 |
)
|
223 |
with gr.Column(scale=1):
|
|
|
231 |
label="Gaussian Representation", height=300, interactive=False
|
232 |
)
|
233 |
aligned_gs = gr.Textbox(visible=False)
|
234 |
+
|
235 |
+
lighting_css = """
|
236 |
+
<style>
|
237 |
+
#lighter_mesh canvas {
|
238 |
+
filter: brightness(2.8) !important;
|
239 |
+
}
|
240 |
+
</style>
|
241 |
+
"""
|
242 |
+
gr.HTML(lighting_css)
|
243 |
with gr.Row():
|
244 |
model_output_mesh = gr.Model3D(
|
245 |
label="Mesh Representation",
|
246 |
height=300,
|
247 |
interactive=False,
|
248 |
clear_color=[1, 1, 1, 1],
|
249 |
+
elem_id="lighter_mesh"
|
250 |
)
|
251 |
gr.Markdown(
|
252 |
""" The rendering of `Gaussian Representation` takes additional 10s. """ # noqa
|
|
|
272 |
outputs=[is_samimage, single_sam_image_example, single_image_example],
|
273 |
)
|
274 |
|
275 |
+
image_prompt_raw.upload(
|
276 |
+
preprocess_image_fn,
|
277 |
+
inputs=[image_prompt_raw],
|
278 |
+
outputs=[image_prompt],
|
279 |
+
)
|
280 |
image_prompt.change(
|
281 |
lambda: tuple(
|
282 |
[
|
|
|
316 |
],
|
317 |
)
|
318 |
image_prompt.change(
|
|
|
|
|
|
|
|
|
319 |
active_btn_by_content,
|
320 |
inputs=image_prompt,
|
321 |
outputs=generate_btn,
|
322 |
)
|
323 |
|
324 |
+
image_prompt_sam_raw.upload(
|
325 |
+
preprocess_sam_image_fn,
|
326 |
+
inputs=[image_prompt_sam_raw],
|
327 |
+
outputs=[image_prompt_sam],
|
328 |
+
)
|
329 |
+
|
330 |
image_prompt_sam.change(
|
331 |
lambda: tuple(
|
332 |
[
|
|
|
366 |
image_mask_sam,
|
367 |
selected_points,
|
368 |
],
|
|
|
|
|
|
|
|
|
369 |
)
|
370 |
|
371 |
image_prompt_sam.select(
|
common.py
CHANGED
@@ -208,17 +208,13 @@ def render_video(
|
|
208 |
@spaces.GPU
|
209 |
def preprocess_image_fn(
|
210 |
image: str | np.ndarray | Image.Image,
|
211 |
-
req: gr.Request,
|
212 |
) -> Image.Image:
|
213 |
-
if image is None:
|
214 |
-
return
|
215 |
-
|
216 |
if isinstance(image, str):
|
217 |
image = Image.open(image)
|
218 |
elif isinstance(image, np.ndarray):
|
219 |
image = Image.fromarray(image)
|
220 |
|
221 |
-
image.save(f"{TMP_DIR}/{req.session_hash}/raw_image.png")
|
222 |
image = RBG_REMOVER(image)
|
223 |
image = trellis_preprocess(image)
|
224 |
|
@@ -227,12 +223,12 @@ def preprocess_image_fn(
|
|
227 |
|
228 |
@spaces.GPU
|
229 |
def preprocess_sam_image_fn(
|
230 |
-
image: Image.Image,
|
231 |
) -> Image.Image:
|
232 |
if isinstance(image, np.ndarray):
|
233 |
image = Image.fromarray(image)
|
234 |
|
235 |
-
image.save(f"{TMP_DIR}/{req.session_hash}/raw_image.png")
|
236 |
sam_image = SAM_PREDICTOR.preprocess_image(image)
|
237 |
SAM_PREDICTOR.predictor.set_image(sam_image)
|
238 |
|
@@ -654,7 +650,7 @@ def text2image_fn(
|
|
654 |
if postprocess:
|
655 |
for idx in range(len(images)):
|
656 |
image = images[idx]
|
657 |
-
images[idx] = preprocess_image_fn(image
|
658 |
|
659 |
save_paths = []
|
660 |
for idx, image in enumerate(images):
|
|
|
208 |
@spaces.GPU
|
209 |
def preprocess_image_fn(
|
210 |
image: str | np.ndarray | Image.Image,
|
|
|
211 |
) -> Image.Image:
|
|
|
|
|
|
|
212 |
if isinstance(image, str):
|
213 |
image = Image.open(image)
|
214 |
elif isinstance(image, np.ndarray):
|
215 |
image = Image.fromarray(image)
|
216 |
|
217 |
+
# image.save(f"{TMP_DIR}/{req.session_hash}/raw_image.png")
|
218 |
image = RBG_REMOVER(image)
|
219 |
image = trellis_preprocess(image)
|
220 |
|
|
|
223 |
|
224 |
@spaces.GPU
|
225 |
def preprocess_sam_image_fn(
|
226 |
+
image: Image.Image,
|
227 |
) -> Image.Image:
|
228 |
if isinstance(image, np.ndarray):
|
229 |
image = Image.fromarray(image)
|
230 |
|
231 |
+
# image.save(f"{TMP_DIR}/{req.session_hash}/raw_image.png")
|
232 |
sam_image = SAM_PREDICTOR.preprocess_image(image)
|
233 |
SAM_PREDICTOR.predictor.set_image(sam_image)
|
234 |
|
|
|
650 |
if postprocess:
|
651 |
for idx in range(len(images)):
|
652 |
image = images[idx]
|
653 |
+
images[idx] = preprocess_image_fn(image)
|
654 |
|
655 |
save_paths = []
|
656 |
for idx, image in enumerate(images):
|