Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -9,15 +9,6 @@ from src.editor import ImageEditorDemo
|
|
9 |
import spaces
|
10 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
11 |
|
12 |
-
scheduler_class = MyEulerAncestralDiscreteScheduler
|
13 |
-
|
14 |
-
pipe_inversion = SDXLDDIMPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True).to(device)
|
15 |
-
pipe_inference = AutoPipelineForImage2Image.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True).to(device)
|
16 |
-
pipe_inference.scheduler = scheduler_class.from_config(pipe_inference.scheduler.config)
|
17 |
-
pipe_inversion.scheduler = scheduler_class.from_config(pipe_inversion.scheduler.config)
|
18 |
-
pipe_inversion.scheduler_inference = scheduler_class.from_config(pipe_inference.scheduler.config)
|
19 |
-
|
20 |
-
|
21 |
# if torch.cuda.is_available():
|
22 |
# torch.cuda.max_memory_allocated(device=device)
|
23 |
# pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
|
@@ -65,13 +56,24 @@ with gr.Blocks(css="style.css") as demo:
|
|
65 |
@spaces.GPU
|
66 |
def set_pipe(input_image, description_prompt, edit_guidance_scale, num_inference_steps=4,
|
67 |
num_inversion_steps=4, inversion_max_step=0.6, rnri_iterations=2, rnri_alpha=0.1, rnri_lr=0.2):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
config = RunConfig(num_inference_steps=num_inference_steps,
|
69 |
num_inversion_steps=num_inversion_steps,
|
70 |
edit_guidance_scale=edit_guidance_scale,
|
71 |
inversion_max_step=inversion_max_step)
|
72 |
-
|
73 |
description_prompt, config, device,
|
74 |
-
[rnri_iterations, rnri_alpha, rnri_lr])
|
|
|
75 |
|
76 |
@spaces.GPU
|
77 |
def edit(editor, target_prompt):
|
|
|
9 |
import spaces
|
10 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
# if torch.cuda.is_available():
|
13 |
# torch.cuda.max_memory_allocated(device=device)
|
14 |
# pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
|
|
|
56 |
@spaces.GPU
|
57 |
def set_pipe(input_image, description_prompt, edit_guidance_scale, num_inference_steps=4,
|
58 |
num_inversion_steps=4, inversion_max_step=0.6, rnri_iterations=2, rnri_alpha=0.1, rnri_lr=0.2):
|
59 |
+
|
60 |
+
scheduler_class = MyEulerAncestralDiscreteScheduler
|
61 |
+
|
62 |
+
pipe_inversion = SDXLDDIMPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True).to(device)
|
63 |
+
pipe_inference = AutoPipelineForImage2Image.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True).to(device)
|
64 |
+
pipe_inference.scheduler = scheduler_class.from_config(pipe_inference.scheduler.config)
|
65 |
+
pipe_inversion.scheduler = scheduler_class.from_config(pipe_inversion.scheduler.config)
|
66 |
+
pipe_inversion.scheduler_inference = scheduler_class.from_config(pipe_inference.scheduler.config)
|
67 |
+
|
68 |
+
|
69 |
config = RunConfig(num_inference_steps=num_inference_steps,
|
70 |
num_inversion_steps=num_inversion_steps,
|
71 |
edit_guidance_scale=edit_guidance_scale,
|
72 |
inversion_max_step=inversion_max_step)
|
73 |
+
image_editor = ImageEditorDemo(pipe_inversion, pipe_inference, input_image,
|
74 |
description_prompt, config, device,
|
75 |
+
[rnri_iterations, rnri_alpha, rnri_lr])
|
76 |
+
return image_editor, "Input has set!"
|
77 |
|
78 |
@spaces.GPU
|
79 |
def edit(editor, target_prompt):
|