rahul7star commited on
Commit
8c45c6d
·
verified ·
1 Parent(s): 30fb3f4

Create app_two_lora.py

Browse files
Files changed (1) hide show
  1. app_two_lora.py +315 -0
app_two_lora.py ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffusers import AutoencoderKLWan, WanPipeline, UniPCMultistepScheduler
3
+ from diffusers.utils import export_to_video
4
+ import gradio as gr
5
+ import tempfile
6
+ import spaces
7
+ from huggingface_hub import hf_hub_download
8
+ import numpy as np
9
+ import random
10
+ import os
11
+ # LIGHT WEIGHT 1.3b
12
+ # MODEL_ID = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers"
13
+ # LORA_REPO_ID = "Kijai/WanVideo_comfy"
14
+ # LORA_FILENAME = "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors"
15
+
16
+
17
+ MODEL_ID = "Wan-AI/Wan2.1-T2V-14B-Diffusers"
18
+
19
+
20
+
21
+
22
+ LORA_REPO_ID = "Kijai/WanVideo_comfy"
23
+ LORA_FILENAME = "Lightx2v/lightx2v_T2V_14B_cfg_step_distill_v2_lora_rank256_bf16.safetensors"
24
+ #LORA_FILENAME = "Pusa/Wan21_PusaV1_LoRA_14B_rank512_bf16.safetensors"
25
+
26
+
27
+ LORA_TWO="moonshotmillion/Wan_FusionX_FaceNaturalizer"
28
+ LORA_TWO_FILE="FusionX_FaceNaturalizer.safetensors"
29
+
30
+ # LORA_REPO_ID = "RaphaelLiu/PusaV1"
31
+ # LORA_FILENAME="pusa_v1.safetensors"
32
+ #LORA_REPO_ID = "Kijai/WanVideo_comfy"
33
+ #LORA_FILENAME = "Wan21_CausVid_14B_T2V_lora_rank32.safetensors"
34
+
35
+
36
+
37
+
38
+
39
+
40
+
41
+
42
+ vae = AutoencoderKLWan.from_pretrained(MODEL_ID, subfolder="vae", torch_dtype=torch.float32)
43
+ pipe = WanPipeline.from_pretrained(
44
+ MODEL_ID, vae=vae, torch_dtype=torch.bfloat16
45
+ )
46
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=8.0)
47
+ pipe.to("cuda")
48
+
49
+ causvid_path = hf_hub_download(repo_id=LORA_REPO_ID, filename=LORA_FILENAME)
50
+
51
+
52
+
53
+
54
+
55
+
56
+ #load 1 mor3 lora
57
+ lora2 = hf_hub_download(repo_id=LORA_REPO_ID, filename=LORA_FILENAME)
58
+
59
+ pipe.load_lora_weights(causvid_path, adapter_name="causvid_lora")
60
+ pipe.load_lora_weights(lora2 , adapter_name="nsfw_lora")
61
+ pipe.set_adapters(["causvid_lora", "nsfw_lora"], adapter_weights=[0.95, 0.85])
62
+ pipe.fuse_lora()
63
+
64
+
65
+
66
+ # MOD_VALUE = 32
67
+ # DEFAULT_H_SLIDER_VALUE = 512
68
+ # DEFAULT_W_SLIDER_VALUE = 896
69
+
70
+ # # Environment variable check
71
+ # IS_ORIGINAL_SPACE = os.environ.get("IS_ORIGINAL_SPACE", "True") == "True"
72
+
73
+ # # Original limits
74
+ # ORIGINAL_SLIDER_MIN_H, ORIGINAL_SLIDER_MAX_H = 128, 1280
75
+ # ORIGINAL_SLIDER_MIN_W, ORIGINAL_SLIDER_MAX_W = 128, 1280
76
+ # ORIGINAL_MAX_DURATION = round(81/24, 1) # MAX_FRAMES_MODEL/FIXED_FPS
77
+
78
+ # # Limited space constants
79
+ # LIMITED_MAX_RESOLUTION = 640
80
+ # LIMITED_MAX_DURATION = 2.0
81
+ # LIMITED_MAX_STEPS = 4
82
+
83
+ # # Set limits based on environment variable
84
+ # if IS_ORIGINAL_SPACE:
85
+ # SLIDER_MIN_H, SLIDER_MAX_H = 128, LIMITED_MAX_RESOLUTION
86
+ # SLIDER_MIN_W, SLIDER_MAX_W = 128, LIMITED_MAX_RESOLUTION
87
+ # MAX_DURATION = LIMITED_MAX_DURATION
88
+ # MAX_STEPS = LIMITED_MAX_STEPS
89
+ # else:
90
+ # SLIDER_MIN_H, SLIDER_MAX_H = ORIGINAL_SLIDER_MIN_H, ORIGINAL_SLIDER_MAX_H
91
+ # SLIDER_MIN_W, SLIDER_MAX_W = ORIGINAL_SLIDER_MIN_W, ORIGINAL_SLIDER_MAX_W
92
+ # MAX_DURATION = ORIGINAL_MAX_DURATION
93
+ # MAX_STEPS = 8
94
+
95
+ # MAX_SEED = np.iinfo(np.int32).max
96
+
97
+ # FIXED_FPS = 24
98
+ # FIXED_OUTPUT_FPS = 18 # we downspeed the output video as a temporary "trick"
99
+ # MIN_FRAMES_MODEL = 8
100
+ # MAX_FRAMES_MODEL = 81
101
+
102
+
103
+ #New math to make it High Res
104
+
105
+ MOD_VALUE = 32
106
+
107
+ # Defaults for higher-res generation
108
+ DEFAULT_H_SLIDER_VALUE = 768
109
+ DEFAULT_W_SLIDER_VALUE = 1344 # 16:9 friendly and divisible by MOD_VALUE
110
+
111
+ # Original Space = Hugging Face space with compute limits
112
+ IS_ORIGINAL_SPACE = os.environ.get("IS_ORIGINAL_SPACE", "True") == "True"
113
+
114
+ # Conservative limits for low-end environments
115
+ LIMITED_MAX_RESOLUTION = 640
116
+ LIMITED_MAX_DURATION = 2.0
117
+ LIMITED_MAX_STEPS = 4
118
+
119
+ # Generous limits for local or Pro spaces
120
+ ORIGINAL_SLIDER_MIN_H, ORIGINAL_SLIDER_MAX_H = 128, 1536
121
+ ORIGINAL_SLIDER_MIN_W, ORIGINAL_SLIDER_MAX_W = 128, 1536
122
+ ORIGINAL_MAX_DURATION = round(81 / 24, 1) # 3.4 seconds
123
+ ORIGINAL_MAX_STEPS = 8
124
+
125
+ # Use limited or original (generous) settings
126
+ if IS_ORIGINAL_SPACE:
127
+ SLIDER_MIN_H, SLIDER_MAX_H = 128, LIMITED_MAX_RESOLUTION
128
+ SLIDER_MIN_W, SLIDER_MAX_W = 128, LIMITED_MAX_RESOLUTION
129
+ MAX_DURATION = LIMITED_MAX_DURATION
130
+ MAX_STEPS = LIMITED_MAX_STEPS
131
+ else:
132
+ SLIDER_MIN_H, SLIDER_MAX_H = ORIGINAL_SLIDER_MIN_H, ORIGINAL_SLIDER_MAX_H
133
+ SLIDER_MIN_W, SLIDER_MAX_W = ORIGINAL_SLIDER_MIN_W, ORIGINAL_SLIDER_MAX_W
134
+ MAX_DURATION = ORIGINAL_MAX_DURATION
135
+ MAX_STEPS = ORIGINAL_MAX_STEPS
136
+
137
+ MAX_SEED = np.iinfo(np.int32).max
138
+
139
+ FIXED_FPS = 24
140
+ FIXED_OUTPUT_FPS = 18 # reduce final video FPS to save space
141
+ MIN_FRAMES_MODEL = 8
142
+ MAX_FRAMES_MODEL = 81
143
+
144
+
145
+ default_prompt_t2v = "cinematic footage, group of pedestrians dancing in the streets of NYC, high quality breakdance, 4K, tiktok video, intricate details, instagram feel, dynamic camera, smooth dance motion, dimly lit, stylish, beautiful faces, smiling, music video"
146
+ default_negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards, watermark, text, signature"
147
+
148
+ def get_duration(prompt, height, width,
149
+ negative_prompt, duration_seconds,
150
+ guidance_scale, steps,
151
+ seed, randomize_seed,
152
+ progress):
153
+ if steps > 4 and duration_seconds > 2:
154
+ return 90
155
+ elif steps > 4 or duration_seconds > 2:
156
+ return 75
157
+ else:
158
+ return 60
159
+
160
+ @spaces.GPU(duration=get_duration)
161
+ def generate_video(prompt, height, width,
162
+ negative_prompt=default_negative_prompt, duration_seconds = 2,
163
+ guidance_scale = 1, steps = 4,
164
+ seed = 42, randomize_seed = False,
165
+ progress=gr.Progress(track_tqdm=True)):
166
+ """
167
+ Generate a video from a text prompt using the Wan 2.1 T2V model with CausVid LoRA.
168
+
169
+ This function takes a text prompt and generates a video based on the provided
170
+ prompt and parameters. It uses the Wan 2.1 1.3B Text-to-Video model with CausVid LoRA
171
+ for fast generation in 3-8 steps.
172
+
173
+ Args:
174
+ prompt (str): Text prompt describing the desired video content.
175
+ height (int): Target height for the output video. Will be adjusted to multiple of MOD_VALUE (32).
176
+ width (int): Target width for the output video. Will be adjusted to multiple of MOD_VALUE (32).
177
+ negative_prompt (str, optional): Negative prompt to avoid unwanted elements.
178
+ Defaults to default_negative_prompt (contains unwanted visual artifacts).
179
+ duration_seconds (float, optional): Duration of the generated video in seconds.
180
+ Defaults to 2. Clamped between MIN_FRAMES_MODEL/FIXED_FPS and MAX_FRAMES_MODEL/FIXED_FPS.
181
+ guidance_scale (float, optional): Controls adherence to the prompt. Higher values = more adherence.
182
+ Defaults to 1.0. Range: 0.0-20.0.
183
+ steps (int, optional): Number of inference steps. More steps = higher quality but slower.
184
+ Defaults to 4. Range: 1-30.
185
+ seed (int, optional): Random seed for reproducible results. Defaults to 42.
186
+ Range: 0 to MAX_SEED (2147483647).
187
+ randomize_seed (bool, optional): Whether to use a random seed instead of the provided seed.
188
+ Defaults to False.
189
+ progress (gr.Progress, optional): Gradio progress tracker. Defaults to gr.Progress(track_tqdm=True).
190
+
191
+ Returns:
192
+ tuple: A tuple containing:
193
+ - video_path (str): Path to the generated video file (.mp4)
194
+ - current_seed (int): The seed used for generation (useful when randomize_seed=True)
195
+
196
+ Raises:
197
+ gr.Error: If prompt is empty or None.
198
+
199
+ Note:
200
+ - Frame count is calculated as duration_seconds * FIXED_FPS (24)
201
+ - Output dimensions are adjusted to be multiples of MOD_VALUE (32)
202
+ - The function uses GPU acceleration via the @spaces.GPU decorator
203
+ - Generation time varies based on steps and duration (see get_duration function)
204
+ """
205
+ if not prompt or prompt.strip() == "":
206
+ raise gr.Error("Please enter a text prompt. Try to use long and precise descriptions.")
207
+
208
+ # Apply limits based on environment variable
209
+ if IS_ORIGINAL_SPACE:
210
+ height = min(height, LIMITED_MAX_RESOLUTION)
211
+ width = min(width, LIMITED_MAX_RESOLUTION)
212
+ duration_seconds = min(duration_seconds, LIMITED_MAX_DURATION)
213
+ steps = min(steps, LIMITED_MAX_STEPS)
214
+
215
+ target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE)
216
+ target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE)
217
+
218
+ num_frames = np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
219
+
220
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
221
+
222
+ with torch.inference_mode():
223
+ output_frames_list = pipe(
224
+ prompt=prompt, negative_prompt=negative_prompt,
225
+ height=target_h, width=target_w, num_frames=num_frames,
226
+ guidance_scale=float(guidance_scale), num_inference_steps=int(steps),
227
+ generator=torch.Generator(device="cuda").manual_seed(current_seed)
228
+ ).frames[0]
229
+
230
+ with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
231
+ video_path = tmpfile.name
232
+ export_to_video(output_frames_list, video_path, fps=FIXED_OUTPUT_FPS)
233
+ return video_path, current_seed
234
+
235
+
236
+
237
+ with gr.Blocks(css="body { max-width: 100vw; overflow-x: hidden; }") as demo:
238
+ gr.HTML('<meta name="viewport" content="width=device-width, initial-scale=1">')
239
+ # ... your other components here ...
240
+ gr.Markdown("# ⚡ InstaVideo")
241
+ gr.Markdown("This Gradio space is a fork of [wan2-1-fast from multimodalart](https://huggingface.co/spaces/multimodalart/wan2-1-fast), and is powered by the Wan CausVid LoRA [from Kijai](https://huggingface.co/Kijai/WanVideo_comfy/blob/main/Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors).")
242
+
243
+ # Add notice for limited spaces
244
+ if IS_ORIGINAL_SPACE:
245
+ gr.Markdown("⚠️ **This free public demo limits the resolution to 640px, duration to 2s, and inference steps to 4. For full capabilities please duplicate this space.**")
246
+
247
+ with gr.Row():
248
+ with gr.Column():
249
+ prompt_input = gr.Textbox(label="Prompt", value=default_prompt_t2v, placeholder="Describe the video you want to generate...")
250
+
251
+ with gr.Accordion("Advanced Settings", open=False):
252
+ negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
253
+ seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
254
+ randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
255
+ with gr.Row():
256
+ height_input = gr.Slider(
257
+ minimum=SLIDER_MIN_H,
258
+ maximum=SLIDER_MAX_H,
259
+ step=MOD_VALUE,
260
+ value=min(DEFAULT_H_SLIDER_VALUE, SLIDER_MAX_H),
261
+ label=f"Output Height (multiple of {MOD_VALUE})"
262
+ )
263
+ width_input = gr.Slider(
264
+ minimum=SLIDER_MIN_W,
265
+ maximum=SLIDER_MAX_W,
266
+ step=MOD_VALUE,
267
+ value=min(DEFAULT_W_SLIDER_VALUE, SLIDER_MAX_W),
268
+ label=f"Output Width (multiple of {MOD_VALUE})"
269
+ )
270
+ duration_seconds_input = gr.Slider(
271
+ minimum=round(MIN_FRAMES_MODEL/FIXED_FPS,1),
272
+ maximum=MAX_DURATION,
273
+ step=0.1,
274
+ value=2,
275
+ label="Duration (seconds)",
276
+ info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps."
277
+ )
278
+ steps_slider = gr.Slider(minimum=1, maximum=MAX_STEPS, step=1, value=4, label="Inference Steps")
279
+ guidance_scale_input = gr.Slider(minimum=0.0, maximum=20.0, step=0.5, value=1.0, label="Guidance Scale", visible=False)
280
+
281
+ generate_button = gr.Button("Generate Video", variant="primary")
282
+ with gr.Column():
283
+ video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False)
284
+
285
+ ui_inputs = [
286
+ prompt_input, height_input, width_input,
287
+ negative_prompt_input, duration_seconds_input,
288
+ guidance_scale_input, steps_slider, seed_input, randomize_seed_checkbox
289
+ ]
290
+ generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
291
+
292
+ # Adjust examples based on space limits
293
+ example_configs = [
294
+ ["a majestic eagle soaring through mountain peaks, cinematic aerial view", 896, 512],
295
+ ["a serene ocean wave crashing on a sandy beach at sunset", 448, 832],
296
+ ["a field of flowers swaying in the wind, spring morning light", 512, 896],
297
+ ]
298
+
299
+ if IS_ORIGINAL_SPACE:
300
+ # Limit example resolutions for limited spaces
301
+ example_configs = [
302
+ [example[0], min(example[1], LIMITED_MAX_RESOLUTION), min(example[2], LIMITED_MAX_RESOLUTION)]
303
+ for example in example_configs
304
+ ]
305
+
306
+ gr.Examples(
307
+ examples=example_configs,
308
+ inputs=[prompt_input, height_input, width_input],
309
+ outputs=[video_output, seed_input],
310
+ fn=generate_video,
311
+ cache_examples="lazy"
312
+ )
313
+
314
+ if __name__ == "__main__":
315
+ demo.queue().launch()