Spaces:
Running
on
Zero
Running
on
Zero
Try with 121 frames + touch-ups
Browse files- app.py +5 -5
- optimization.py +1 -1
app.py
CHANGED
@@ -25,7 +25,7 @@ MAX_SEED = np.iinfo(np.int32).max
|
|
25 |
|
26 |
FIXED_FPS = 24
|
27 |
MIN_FRAMES_MODEL = 8
|
28 |
-
MAX_FRAMES_MODEL =
|
29 |
|
30 |
MIN_DURATION = round(MIN_FRAMES_MODEL/FIXED_FPS,1)
|
31 |
MAX_DURATION = round(MAX_FRAMES_MODEL/FIXED_FPS,1)
|
@@ -56,7 +56,7 @@ optimize_pipeline_(pipe,
|
|
56 |
|
57 |
|
58 |
default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation"
|
59 |
-
default_negative_prompt = "
|
60 |
|
61 |
|
62 |
def resize_image(image: Image.Image) -> Image.Image:
|
@@ -100,8 +100,8 @@ def generate_video(
|
|
100 |
prompt,
|
101 |
negative_prompt=default_negative_prompt,
|
102 |
duration_seconds = MAX_DURATION,
|
103 |
-
guidance_scale =
|
104 |
-
steps =
|
105 |
seed = 42,
|
106 |
randomize_seed = False,
|
107 |
progress=gr.Progress(track_tqdm=True),
|
@@ -184,7 +184,7 @@ with gr.Blocks() as demo:
|
|
184 |
negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
|
185 |
seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
|
186 |
randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
|
187 |
-
steps_slider = gr.Slider(minimum=1, maximum=
|
188 |
guidance_scale_input = gr.Slider(minimum=0.0, maximum=20.0, step=0.5, value=1.0, label="Guidance Scale", visible=False)
|
189 |
|
190 |
generate_button = gr.Button("Generate Video", variant="primary")
|
|
|
25 |
|
26 |
FIXED_FPS = 24
|
27 |
MIN_FRAMES_MODEL = 8
|
28 |
+
MAX_FRAMES_MODEL = 121
|
29 |
|
30 |
MIN_DURATION = round(MIN_FRAMES_MODEL/FIXED_FPS,1)
|
31 |
MAX_DURATION = round(MAX_FRAMES_MODEL/FIXED_FPS,1)
|
|
|
56 |
|
57 |
|
58 |
default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation"
|
59 |
+
default_negative_prompt = "色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走"
|
60 |
|
61 |
|
62 |
def resize_image(image: Image.Image) -> Image.Image:
|
|
|
100 |
prompt,
|
101 |
negative_prompt=default_negative_prompt,
|
102 |
duration_seconds = MAX_DURATION,
|
103 |
+
guidance_scale = 3.5,
|
104 |
+
steps = 28,
|
105 |
seed = 42,
|
106 |
randomize_seed = False,
|
107 |
progress=gr.Progress(track_tqdm=True),
|
|
|
184 |
negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
|
185 |
seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
|
186 |
randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
|
187 |
+
steps_slider = gr.Slider(minimum=1, maximum=40, step=1, value=28, label="Inference Steps")
|
188 |
guidance_scale_input = gr.Slider(minimum=0.0, maximum=20.0, step=0.5, value=1.0, label="Guidance Scale", visible=False)
|
189 |
|
190 |
generate_button = gr.Button("Generate Video", variant="primary")
|
optimization.py
CHANGED
@@ -20,7 +20,7 @@ from optimization_utils import ZeroGPUCompiledModel
|
|
20 |
P = ParamSpec('P')
|
21 |
|
22 |
|
23 |
-
TRANSFORMER_NUM_FRAMES_DIM = torch.export.Dim('num_frames', min=3, max=
|
24 |
|
25 |
TRANSFORMER_DYNAMIC_SHAPES = {
|
26 |
'hidden_states': {
|
|
|
20 |
P = ParamSpec('P')
|
21 |
|
22 |
|
23 |
+
TRANSFORMER_NUM_FRAMES_DIM = torch.export.Dim('num_frames', min=3, max=31)
|
24 |
|
25 |
TRANSFORMER_DYNAMIC_SHAPES = {
|
26 |
'hidden_states': {
|