ovi054 commited on
Commit
7dcfa41
·
verified ·
1 Parent(s): 85bbc23

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -15,7 +15,7 @@ import spaces
15
  model_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers"
16
  vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
17
  pipe = WanPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.bfloat16)
18
- flow_shift = 5.0 # 5.0 for 720P, 3.0 for 480P
19
  pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=flow_shift)
20
 
21
 
@@ -38,7 +38,7 @@ def generate(prompt, negative_prompt, width=1024, height=1024, num_inference_ste
38
  width=width,
39
  num_frames=1,
40
  num_inference_steps=num_inference_steps,
41
- guidance_scale=5.0,
42
  )
43
  image = output.frames[0][0]
44
  image = (image * 255).astype(np.uint8)
 
15
  model_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers"
16
  vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
17
  pipe = WanPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.bfloat16)
18
+ flow_shift = 1.0 #5.0 1.0 for image, 5.0 for 720P, 3.0 for 480P
19
  pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=flow_shift)
20
 
21
 
 
38
  width=width,
39
  num_frames=1,
40
  num_inference_steps=num_inference_steps,
41
+ guidance_scale=1.0, #5.0
42
  )
43
  image = output.frames[0][0]
44
  image = (image * 255).astype(np.uint8)