Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -11,20 +11,23 @@ import tempfile
|
|
11 |
import io
|
12 |
import base64
|
13 |
import requests
|
|
|
|
|
|
|
14 |
|
15 |
# Инициализация моделей
|
16 |
from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation
|
17 |
-
|
18 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
19 |
-
|
20 |
# oneFormer segmentation
|
21 |
oneFormer_processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
|
22 |
oneFormer_model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny").to(device)
|
23 |
-
|
24 |
# classification = pipeline("image-classification", model="google/vit-base-patch16-224")
|
25 |
# upscaling_client = InferenceClient(model="stabilityai/stable-diffusion-x4-upscaler")
|
26 |
# inpainting_client = InferenceClient(model="stabilityai/stable-diffusion-inpainting")
|
|
|
|
|
27 |
# Функции для обработки изображений
|
|
|
28 |
def segment_image(image):
|
29 |
inputs = oneFormer_processor(image, task_inputs=["panoptic"], return_tensors="pt")
|
30 |
|
@@ -134,7 +137,7 @@ def remove_black_make_transparent(imageEditor):
|
|
134 |
transparent_image = Image.fromarray(image_np)
|
135 |
return transparent_image
|
136 |
|
137 |
-
def rembg(imageEditor, request: gr.Request):
|
138 |
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as temp_file:
|
139 |
imageEditor['composite'].save(temp_file.name)
|
140 |
temp_file_path = temp_file.name
|
@@ -142,7 +145,7 @@ def rembg(imageEditor, request: gr.Request):
|
|
142 |
result = client.predict(
|
143 |
file=handle_file(temp_file_path),
|
144 |
mask="Default",
|
145 |
-
model=
|
146 |
x=0,
|
147 |
y=0,
|
148 |
api_name="/inference"
|
@@ -181,6 +184,64 @@ def upscale_image(image_pil, version="v1.4", rescaling_factor=None):
|
|
181 |
return upscaled_image
|
182 |
|
183 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
184 |
#3d models
|
185 |
def hunyuan_client(request: gr.Request):
|
186 |
try:
|
@@ -248,6 +309,16 @@ def generate_3d_model2(image_pil, request: gr.Request):
|
|
248 |
print(result)
|
249 |
return result[0]
|
250 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
251 |
|
252 |
########## GRADIO ##########
|
253 |
|
@@ -276,13 +347,114 @@ with gr.Blocks() as demo:
|
|
276 |
with gr.Row(equal_height=True):
|
277 |
upscale_slider = gr.Slider(minimum=1, maximum=5, value=2, step=0.1, label="во сколько раз")
|
278 |
upscale_button = gr.Button("Upscale")
|
279 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
280 |
remove_background_button = gr.Button("Убрать черный задний фон")
|
281 |
with gr.Row(equal_height=True):
|
282 |
add_transparent_border_slider = gr.Slider(minimum=10, maximum=500, value=200, step=10, label="в пикселях")
|
283 |
add_transparent_border_button = gr.Button("Добавить прозрачные края")
|
|
|
284 |
use_button = gr.Button("Использовать сегмент для 3D")
|
285 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
286 |
with gr.Tab("Создание 3D"):
|
287 |
with gr.Row(equal_height=True):
|
288 |
with gr.Column(scale=5):
|
@@ -301,11 +473,17 @@ with gr.Blocks() as demo:
|
|
301 |
#tab2
|
302 |
crop_button.click(autocrop_image, inputs=segment_input, outputs=segment_input)
|
303 |
upscale_button.click(upscale, inputs=[segment_input, upscale_slider], outputs=segment_input)
|
304 |
-
rembg_button.click(rembg, inputs=segment_input, outputs=segment_input)
|
305 |
remove_background_button.click(remove_black_make_transparent, inputs=segment_input, outputs=segment_input)
|
306 |
add_transparent_border_button.click(add_transparent_border, inputs=[segment_input, add_transparent_border_slider], outputs=segment_input)
|
|
|
307 |
use_button.click(return_image, inputs=segment_input, outputs=segment_3d_input)
|
308 |
|
|
|
|
|
|
|
|
|
|
|
309 |
#3d buttons
|
310 |
hunyuan_button.click(generate_3d_model, inputs=[segment_3d_input, rembg_Hunyuan], outputs=trellis_output)
|
311 |
hunyuan_button_texture.click(generate_3d_model_texture, inputs=[segment_3d_input, rembg_Hunyuan], outputs=trellis_output)
|
|
|
11 |
import io
|
12 |
import base64
|
13 |
import requests
|
14 |
+
from collections import OrderedDict
|
15 |
+
import uuid
|
16 |
+
|
17 |
|
18 |
# Инициализация моделей
|
19 |
from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation
|
|
|
20 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
21 |
# oneFormer segmentation
|
22 |
oneFormer_processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
|
23 |
oneFormer_model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny").to(device)
|
|
|
24 |
# classification = pipeline("image-classification", model="google/vit-base-patch16-224")
|
25 |
# upscaling_client = InferenceClient(model="stabilityai/stable-diffusion-x4-upscaler")
|
26 |
# inpainting_client = InferenceClient(model="stabilityai/stable-diffusion-inpainting")
|
27 |
+
|
28 |
+
|
29 |
# Функции для обработки изображений
|
30 |
+
|
31 |
def segment_image(image):
|
32 |
inputs = oneFormer_processor(image, task_inputs=["panoptic"], return_tensors="pt")
|
33 |
|
|
|
137 |
transparent_image = Image.fromarray(image_np)
|
138 |
return transparent_image
|
139 |
|
140 |
+
def rembg(imageEditor, rembg_model, request: gr.Request):
|
141 |
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as temp_file:
|
142 |
imageEditor['composite'].save(temp_file.name)
|
143 |
temp_file_path = temp_file.name
|
|
|
145 |
result = client.predict(
|
146 |
file=handle_file(temp_file_path),
|
147 |
mask="Default",
|
148 |
+
model=rembg_model,
|
149 |
x=0,
|
150 |
y=0,
|
151 |
api_name="/inference"
|
|
|
184 |
return upscaled_image
|
185 |
|
186 |
|
187 |
+
# def inpainting(source_img, request: gr.Request):
|
188 |
+
# input_image = source_img["background"].convert("RGB")
|
189 |
+
# mask_image = source_img["layers"][0].convert("RGB")
|
190 |
+
|
191 |
+
# return inpainting_image(imageEditor['composite'])
|
192 |
+
|
193 |
+
def inpainting_run(model_name, use_rasg, use_painta, prompt, imageMask,
|
194 |
+
hr_image, seed, eta, negative_prompt, positive_prompt, ddim_steps,
|
195 |
+
guidance_scale=7.5, batch_size=1, session_id=''
|
196 |
+
):
|
197 |
+
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as temp_file:
|
198 |
+
# hr_image.save(temp_file.name)
|
199 |
+
imageMask["background"].save(temp_file.name)
|
200 |
+
temp_file_path = temp_file.name #картинка
|
201 |
+
print("background", temp_file_path)
|
202 |
+
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as temp_file2:
|
203 |
+
imageMask["layers"][0].save(temp_file2.name)
|
204 |
+
temp_file_path2 = temp_file2.name # маска
|
205 |
+
print("маска", temp_file_path2)
|
206 |
+
|
207 |
+
|
208 |
+
client = Client("ameerazam08/FLUX.1-dev-Inpainting-Model-Beta-GPU")
|
209 |
+
result = client.predict(
|
210 |
+
input_image_editor={"background":handle_file(temp_file_path),"layers":[handle_file(temp_file_path2)],"composite":None},
|
211 |
+
prompt=prompt,
|
212 |
+
negative_prompt=negative_prompt,
|
213 |
+
controlnet_conditioning_scale=0.9,
|
214 |
+
guidance_scale=3.5,
|
215 |
+
seed=124,
|
216 |
+
num_inference_steps=24,
|
217 |
+
true_guidance_scale=3.5,
|
218 |
+
api_name="/process"
|
219 |
+
)
|
220 |
+
print(result)
|
221 |
+
return [(result, 'label')]
|
222 |
+
|
223 |
+
|
224 |
+
# client = Client("https://pair-hd-painter.hf.space/--replicas/0l7ng/")
|
225 |
+
# result = client.predict(
|
226 |
+
# model_name, # str (Option from: [('Dreamshaper Inpainting V8', 'Dreamshaper Inpainting V8'), ('Stable-Inpainting 2.0', 'Stable-Inpainting 2.0'), ('Stable-Inpainting 1.5', 'Stable-Inpainting 1.5')]) in 'Please select a model!' Dropdown component
|
227 |
+
# use_rasg, # bool in 'Use RASG' Checkbox component
|
228 |
+
# use_painta, # bool in 'Use PAIntA' Checkbox component
|
229 |
+
# prompt, # str in 'Inpainting Prompt' Textbox component
|
230 |
+
# handle_file(temp_file_path2), # str (filepath on your computer (or URL) of image) in 'Input Image' Image component МАСКА
|
231 |
+
# handle_file(temp_file_path), # str (filepath on your computer (or URL) of image) in 'parameter_15' Image component КАРТИНКА
|
232 |
+
# seed, # int | float in 'Seed' Number component
|
233 |
+
# eta, # int | float (numeric value between 0 and 1) in 'eta' Slider component
|
234 |
+
# negative_prompt, # str in 'Negative prompt' Textbox component
|
235 |
+
# positive_prompt, # str in 'Positive prompt' Textbox component
|
236 |
+
# ddim_steps, # int | float (numeric value between 10 and 100) in 'Number of diffusion steps' Slider component
|
237 |
+
# guidance_scale, # int | float (numeric value between 0 and 30) in 'Guidance Scale' Slider component
|
238 |
+
# batch_size, # int | float in 'Batch size' Number component
|
239 |
+
# session_id, # str in 'parameter_44' Textbox component
|
240 |
+
# api_name="/inpaint"
|
241 |
+
# )
|
242 |
+
# print(result)
|
243 |
+
|
244 |
+
|
245 |
#3d models
|
246 |
def hunyuan_client(request: gr.Request):
|
247 |
try:
|
|
|
309 |
print(result)
|
310 |
return result[0]
|
311 |
|
312 |
+
### some configs
|
313 |
+
|
314 |
+
inpainting_models = OrderedDict([
|
315 |
+
("Dreamshaper Inpainting V8", 'ds8_inp'),
|
316 |
+
("Stable-Inpainting 2.0", 'sd2_inp'),
|
317 |
+
("Stable-Inpainting 1.5", 'sd15_inp')
|
318 |
+
])
|
319 |
+
|
320 |
+
negative_prompt_str = "text, bad anatomy, bad proportions, blurry, cropped, deformed, disfigured, duplicate, error, extra limbs, gross proportions, jpeg artifacts, long neck, low quality, lowres, malformed, morbid, mutated, mutilated, out of frame, ugly, worst quality"
|
321 |
+
positive_prompt_str = "Full HD, 4K, high quality, high resolution"
|
322 |
|
323 |
########## GRADIO ##########
|
324 |
|
|
|
347 |
with gr.Row(equal_height=True):
|
348 |
upscale_slider = gr.Slider(minimum=1, maximum=5, value=2, step=0.1, label="во сколько раз")
|
349 |
upscale_button = gr.Button("Upscale")
|
350 |
+
with gr.Row(equal_height=True):
|
351 |
+
rembg_model_selector = gr.Dropdown(
|
352 |
+
[
|
353 |
+
"u2net",
|
354 |
+
"u2netp",
|
355 |
+
"u2net_human_seg",
|
356 |
+
"u2net_cloth_seg",
|
357 |
+
"silueta",
|
358 |
+
"isnet-general-use",
|
359 |
+
"isnet-anime",
|
360 |
+
"birefnet-general",
|
361 |
+
"birefnet-general-lite",
|
362 |
+
"birefnet-portrait",
|
363 |
+
"birefnet-dis",
|
364 |
+
"birefnet-hrsod",
|
365 |
+
"birefnet-cod",
|
366 |
+
"birefnet-massive"
|
367 |
+
],
|
368 |
+
value="birefnet-general-lite",
|
369 |
+
label="Rembg model"
|
370 |
+
)
|
371 |
+
rembg_button = gr.Button("Rembg")
|
372 |
remove_background_button = gr.Button("Убрать черный задний фон")
|
373 |
with gr.Row(equal_height=True):
|
374 |
add_transparent_border_slider = gr.Slider(minimum=10, maximum=500, value=200, step=10, label="в пикселях")
|
375 |
add_transparent_border_button = gr.Button("Добавить прозрачные края")
|
376 |
+
use_inpainting_button = gr.Button("Использовать сегмент для Inpainting")
|
377 |
use_button = gr.Button("Использовать сегмент для 3D")
|
378 |
+
with gr.Tab("Inpainting"):
|
379 |
+
# with gr.Row(equal_height=True):
|
380 |
+
# with gr.Column(scale=5):
|
381 |
+
# inpainting_input = gr.ImageEditor(type="pil", label="Сегмент для Inpainting")
|
382 |
+
# with gr.Column(scale=5):
|
383 |
+
# inpainting_text_input = gr.Textbox(label="Текст для Inpainting")
|
384 |
+
# inpainting_button = gr.Button("Inpainting")
|
385 |
+
# use_inpainting_button2 = gr.Button("Вернуться к редактированию")
|
386 |
+
# use_button2 = gr.Button("Использовать сегмент для 3D")
|
387 |
+
|
388 |
+
with gr.Row():
|
389 |
+
session_id = gr.Textbox(value=str(uuid.uuid4()), visible=False)
|
390 |
+
with gr.Column():
|
391 |
+
model_picker = gr.Dropdown(
|
392 |
+
list(inpainting_models.keys()),
|
393 |
+
value=list(inpainting_models.keys())[0],
|
394 |
+
label = "Please select a model!",
|
395 |
+
)
|
396 |
+
with gr.Column():
|
397 |
+
use_painta = gr.Checkbox(value = True, label = "Use PAIntA")
|
398 |
+
use_rasg = gr.Checkbox(value = True, label = "Use RASG")
|
399 |
+
prompt = gr.Textbox(label = "Inpainting Prompt")
|
400 |
+
with gr.Row():
|
401 |
+
with gr.Column():
|
402 |
+
# imageMask = gr.ImageMask(label = "Input Image", brush_color='#ff0000', elem_id="inputmask", type="pil")
|
403 |
+
# imageMask = gr.ImageMask(label = "Input Image", elem_id="inputmask", type="pil")
|
404 |
+
imageMask = gr.ImageEditor(
|
405 |
+
label='Image',
|
406 |
+
type='pil',
|
407 |
+
sources=["upload", "webcam"],
|
408 |
+
image_mode='RGB',
|
409 |
+
layers=False,
|
410 |
+
brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed"))
|
411 |
+
hr_image = gr.Image(visible=False, type="pil")
|
412 |
+
|
413 |
+
with gr.Row():
|
414 |
+
inpaint_btn = gr.Button("Inpaint", scale = 0)
|
415 |
+
|
416 |
+
with gr.Accordion('Advanced options', open=False):
|
417 |
+
guidance_scale = gr.Slider(minimum = 0, maximum = 30, value = 7.5, label = "Guidance Scale")
|
418 |
+
eta = gr.Slider(minimum = 0, maximum = 1, value = 0.1, label = "eta")
|
419 |
+
ddim_steps = gr.Slider(minimum = 10, maximum = 100, value = 50, step = 1, label = 'Number of diffusion steps')
|
420 |
+
with gr.Row():
|
421 |
+
seed = gr.Number(value = 49123, label = "Seed")
|
422 |
+
batch_size = gr.Number(value = 1, label = "Batch size", minimum=1, maximum=4)
|
423 |
+
negative_prompt = gr.Textbox(value=negative_prompt_str, label = "Negative prompt", lines=3)
|
424 |
+
positive_prompt = gr.Textbox(value=positive_prompt_str, label = "Positive prompt", lines=1)
|
425 |
+
|
426 |
+
with gr.Column():
|
427 |
+
with gr.Row():
|
428 |
+
output_gallery = gr.Gallery(
|
429 |
+
[],
|
430 |
+
columns = 4,
|
431 |
+
preview = True,
|
432 |
+
allow_preview = True,
|
433 |
+
object_fit='scale-down',
|
434 |
+
elem_id='outputgallery'
|
435 |
+
)
|
436 |
+
inpaint_btn.click(
|
437 |
+
fn=inpainting_run,
|
438 |
+
inputs=[
|
439 |
+
model_picker,
|
440 |
+
use_rasg,
|
441 |
+
use_painta,
|
442 |
+
prompt,
|
443 |
+
imageMask,
|
444 |
+
hr_image,
|
445 |
+
seed,
|
446 |
+
eta,
|
447 |
+
negative_prompt,
|
448 |
+
positive_prompt,
|
449 |
+
ddim_steps,
|
450 |
+
guidance_scale,
|
451 |
+
batch_size,
|
452 |
+
session_id
|
453 |
+
],
|
454 |
+
outputs=output_gallery,
|
455 |
+
api_name="inpaint"
|
456 |
+
)
|
457 |
+
|
458 |
with gr.Tab("Создание 3D"):
|
459 |
with gr.Row(equal_height=True):
|
460 |
with gr.Column(scale=5):
|
|
|
473 |
#tab2
|
474 |
crop_button.click(autocrop_image, inputs=segment_input, outputs=segment_input)
|
475 |
upscale_button.click(upscale, inputs=[segment_input, upscale_slider], outputs=segment_input)
|
476 |
+
rembg_button.click(rembg, inputs=[segment_input, rembg_model_selector], outputs=segment_input)
|
477 |
remove_background_button.click(remove_black_make_transparent, inputs=segment_input, outputs=segment_input)
|
478 |
add_transparent_border_button.click(add_transparent_border, inputs=[segment_input, add_transparent_border_slider], outputs=segment_input)
|
479 |
+
# use_inpainting_button.click(return_image, inputs=segment_input, outputs=inpainting_input)
|
480 |
use_button.click(return_image, inputs=segment_input, outputs=segment_3d_input)
|
481 |
|
482 |
+
#tab3
|
483 |
+
# inpainting_button.click(inpainting, inputs=inpainting_input, outputs=inpainting_input)
|
484 |
+
# use_inpainting_button2.click(return_image, inputs=inpainting_input, outputs=segment_input)
|
485 |
+
# use_button2.click(return_image, inputs=inpainting_input, outputs=segment_3d_input)
|
486 |
+
|
487 |
#3d buttons
|
488 |
hunyuan_button.click(generate_3d_model, inputs=[segment_3d_input, rembg_Hunyuan], outputs=trellis_output)
|
489 |
hunyuan_button_texture.click(generate_3d_model_texture, inputs=[segment_3d_input, rembg_Hunyuan], outputs=trellis_output)
|