Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -261,7 +261,7 @@ def generate_video_from_text_90(
|
|
261 |
"media_items": None,
|
262 |
}
|
263 |
|
264 |
-
generator = torch.Generator(device="
|
265 |
|
266 |
def gradio_progress_callback(self, step, timestep, kwargs):
|
267 |
progress((step + 1) / num_inference_steps)
|
@@ -319,7 +319,7 @@ def generate_video_from_image_90(
|
|
319 |
frame_rate=20,
|
320 |
seed=random.randint(0, MAX_SEED),
|
321 |
num_inference_steps=35,
|
322 |
-
guidance_scale=
|
323 |
height=768,
|
324 |
width=768,
|
325 |
num_frames=60,
|
@@ -405,7 +405,7 @@ def create_advanced_options():
|
|
405 |
with gr.Accordion("Step 4: Advanced Options (Optional)", open=False):
|
406 |
seed = gr.Slider(label="4.1 Seed", minimum=0, maximum=1000000, step=1, value=646373)
|
407 |
inference_steps = gr.Slider(label="4.2 Inference Steps", minimum=5, maximum=150, step=5, value=40)
|
408 |
-
guidance_scale = gr.Slider(label="4.3 Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=
|
409 |
|
410 |
height_slider = gr.Slider(
|
411 |
label="4.4 Height",
|
|
|
261 |
"media_items": None,
|
262 |
}
|
263 |
|
264 |
+
generator = torch.Generator(device="cpu").manual_seed(seed)
|
265 |
|
266 |
def gradio_progress_callback(self, step, timestep, kwargs):
|
267 |
progress((step + 1) / num_inference_steps)
|
|
|
319 |
frame_rate=20,
|
320 |
seed=random.randint(0, MAX_SEED),
|
321 |
num_inference_steps=35,
|
322 |
+
guidance_scale=4.2,
|
323 |
height=768,
|
324 |
width=768,
|
325 |
num_frames=60,
|
|
|
405 |
with gr.Accordion("Step 4: Advanced Options (Optional)", open=False):
|
406 |
seed = gr.Slider(label="4.1 Seed", minimum=0, maximum=1000000, step=1, value=646373)
|
407 |
inference_steps = gr.Slider(label="4.2 Inference Steps", minimum=5, maximum=150, step=5, value=40)
|
408 |
+
guidance_scale = gr.Slider(label="4.3 Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=4.2)
|
409 |
|
410 |
height_slider = gr.Slider(
|
411 |
label="4.4 Height",
|