Spaces:
Runtime error
Runtime error
fix timesteps
Browse files
app.py
CHANGED
|
@@ -162,6 +162,7 @@ def inference(text, init_image, skip_timesteps, clip_guidance_scale, tv_scale, r
|
|
| 162 |
init = Image.fromarray(init_image).convert('RGB')
|
| 163 |
init = init.resize((side_x, side_y), Image.LANCZOS)
|
| 164 |
init = TF.to_tensor(init).to(device).unsqueeze(0).mul(2).sub(1)
|
|
|
|
| 165 |
cur_t = None
|
| 166 |
def cond_fn(x, t, y=None):
|
| 167 |
with torch.enable_grad():
|
|
@@ -246,8 +247,8 @@ with demo:
|
|
| 246 |
# with gr.Group():
|
| 247 |
with gr.Column():
|
| 248 |
init_image = gr.Image(source="upload", label='initial image (optional)')
|
| 249 |
-
init_scale = gr.Slider(minimum=0, maximum=
|
| 250 |
-
skip_timesteps = gr.Slider(minimum=0, maximum=100, step=1, value=
|
| 251 |
# with gr.Group():
|
| 252 |
with gr.Column():
|
| 253 |
image_prompts = gr.Image(source="upload", label='image prompt (optional)')
|
|
|
|
| 162 |
init = Image.fromarray(init_image).convert('RGB')
|
| 163 |
init = init.resize((side_x, side_y), Image.LANCZOS)
|
| 164 |
init = TF.to_tensor(init).to(device).unsqueeze(0).mul(2).sub(1)
|
| 165 |
+
else: skip_timesteps = 0
|
| 166 |
cur_t = None
|
| 167 |
def cond_fn(x, t, y=None):
|
| 168 |
with torch.enable_grad():
|
|
|
|
| 247 |
# with gr.Group():
|
| 248 |
with gr.Column():
|
| 249 |
init_image = gr.Image(source="upload", label='initial image (optional)')
|
| 250 |
+
init_scale = gr.Slider(minimum=0, maximum=10000, step=100, value=1000, label="Look like the image above")
|
| 251 |
+
skip_timesteps = gr.Slider(minimum=0, maximum=100, step=1, value=30, label="Style strength")
|
| 252 |
# with gr.Group():
|
| 253 |
with gr.Column():
|
| 254 |
image_prompts = gr.Image(source="upload", label='image prompt (optional)')
|