Spaces:
Configuration error
Configuration error
update state initialization
Browse files
app.py
CHANGED
@@ -27,14 +27,14 @@ vqgan.eval()
|
|
27 |
processor = ProcessorGradientFlow(device=device)
|
28 |
clip = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
|
29 |
clip.to(device)
|
30 |
-
promptoptim = ImagePromptOptimizer(vqgan, clip, processor, quantize=True)
|
31 |
-
state = ImageState(vqgan, promptoptim)
|
32 |
def set_img_from_example(img):
|
33 |
return state.update_images(img, img, 0)
|
34 |
def get_cleared_mask():
|
35 |
return gr.Image.update(value=None)
|
36 |
# mask.clear()
|
37 |
with gr.Blocks(css="styles.css") as demo:
|
|
|
|
|
38 |
with gr.Row():
|
39 |
with gr.Column(scale=1):
|
40 |
blue_eyes = gr.Slider(
|
@@ -167,14 +167,10 @@ with gr.Blocks(css="styles.css") as demo:
|
|
167 |
lip_size.change(state.apply_lip_vector, inputs=[lip_size], outputs=[out, mask])
|
168 |
# hair_green_purple.change(state.apply_gp_vector, inputs=[hair_green_purple], outputs=[out, mask])
|
169 |
blue_eyes.change(state.apply_rb_vector, inputs=[blue_eyes], outputs=[out, mask])
|
170 |
-
|
171 |
blend_weight.change(state.blend, inputs=[blend_weight], outputs=[out, mask])
|
172 |
# requantize.change(state.update_requant, inputs=[requantize], outputs=[out, mask])
|
173 |
-
|
174 |
-
|
175 |
base_img.change(state.update_images, inputs=[base_img, blend_img, blend_weight], outputs=[out, mask])
|
176 |
blend_img.change(state.update_images, inputs=[base_img, blend_img, blend_weight], outputs=[out, mask])
|
177 |
-
|
178 |
small_local.click(set_small_local, outputs=[iterations, learning_rate, lpips_weight, reconstruction_steps])
|
179 |
major_local.click(set_major_local, outputs=[iterations, learning_rate, lpips_weight, reconstruction_steps])
|
180 |
major_global.click(set_major_global, outputs=[iterations, learning_rate, lpips_weight, reconstruction_steps])
|
|
|
27 |
processor = ProcessorGradientFlow(device=device)
|
28 |
clip = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
|
29 |
clip.to(device)
|
|
|
|
|
30 |
def set_img_from_example(img):
|
31 |
return state.update_images(img, img, 0)
|
32 |
def get_cleared_mask():
|
33 |
return gr.Image.update(value=None)
|
34 |
# mask.clear()
|
35 |
with gr.Blocks(css="styles.css") as demo:
|
36 |
+
promptoptim = ImagePromptOptimizer(vqgan, clip, processor, quantize=True)
|
37 |
+
state = ImageState(vqgan, promptoptim)
|
38 |
with gr.Row():
|
39 |
with gr.Column(scale=1):
|
40 |
blue_eyes = gr.Slider(
|
|
|
167 |
lip_size.change(state.apply_lip_vector, inputs=[lip_size], outputs=[out, mask])
|
168 |
# hair_green_purple.change(state.apply_gp_vector, inputs=[hair_green_purple], outputs=[out, mask])
|
169 |
blue_eyes.change(state.apply_rb_vector, inputs=[blue_eyes], outputs=[out, mask])
|
|
|
170 |
blend_weight.change(state.blend, inputs=[blend_weight], outputs=[out, mask])
|
171 |
# requantize.change(state.update_requant, inputs=[requantize], outputs=[out, mask])
|
|
|
|
|
172 |
base_img.change(state.update_images, inputs=[base_img, blend_img, blend_weight], outputs=[out, mask])
|
173 |
blend_img.change(state.update_images, inputs=[base_img, blend_img, blend_weight], outputs=[out, mask])
|
|
|
174 |
small_local.click(set_small_local, outputs=[iterations, learning_rate, lpips_weight, reconstruction_steps])
|
175 |
major_local.click(set_major_local, outputs=[iterations, learning_rate, lpips_weight, reconstruction_steps])
|
176 |
major_global.click(set_major_global, outputs=[iterations, learning_rate, lpips_weight, reconstruction_steps])
|