tsqn commited on
Commit
7ba92d3
·
verified ·
1 Parent(s): 96cc85c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -58,10 +58,10 @@ pipe = CogVideoXPipeline.from_pretrained(
58
  transformer=transformer,
59
  vae=vae,
60
  torch_dtype=torch.bfloat16
61
- ).to(device)
62
  pipe.scheduler = CogVideoXDPMScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
63
 
64
- # pipe.enable_model_cpu_offload()
65
  pipe.vae.enable_tiling()
66
  pipe.vae.enable_slicing()
67
 
@@ -261,9 +261,9 @@ def infer(
261
  use_dynamic_cfg=True,
262
  output_type="pt",
263
  guidance_scale=guidance_scale,
264
- generator=torch.Generator(device=device).manual_seed(seed),
265
  ).frames
266
- pipe_video.to(device)
267
  del pipe_video
268
  gc.collect()
269
  torch.cuda.empty_cache()
@@ -287,9 +287,9 @@ def infer(
287
  use_dynamic_cfg=True,
288
  output_type="pt",
289
  guidance_scale=guidance_scale,
290
- generator=torch.Generator(device=device).manual_seed(seed),
291
  ).frames
292
- pipe_image.to(device)
293
  del pipe_image
294
  gc.collect()
295
  torch.cuda.empty_cache()
@@ -303,9 +303,9 @@ def infer(
303
  use_dynamic_cfg=True,
304
  output_type="pt",
305
  guidance_scale=guidance_scale,
306
- generator=torch.Generator(device=device).manual_seed(seed),
307
  ).frames
308
- pipe.to(device)
309
  gc.collect()
310
  return (video_pt, seed)
311
 
 
58
  transformer=transformer,
59
  vae=vae,
60
  torch_dtype=torch.bfloat16
61
+ ).to("cpu")
62
  pipe.scheduler = CogVideoXDPMScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
63
 
64
+ pipe.enable_model_cpu_offload()
65
  pipe.vae.enable_tiling()
66
  pipe.vae.enable_slicing()
67
 
 
261
  use_dynamic_cfg=True,
262
  output_type="pt",
263
  guidance_scale=guidance_scale,
264
+ generator=torch.Generator(device="cpu").manual_seed(seed),
265
  ).frames
266
+ pipe_video.to("cpu")
267
  del pipe_video
268
  gc.collect()
269
  torch.cuda.empty_cache()
 
287
  use_dynamic_cfg=True,
288
  output_type="pt",
289
  guidance_scale=guidance_scale,
290
+ generator=torch.Generator(device="cpu").manual_seed(seed),
291
  ).frames
292
+ pipe_image.to("cpu")
293
  del pipe_image
294
  gc.collect()
295
  torch.cuda.empty_cache()
 
303
  use_dynamic_cfg=True,
304
  output_type="pt",
305
  guidance_scale=guidance_scale,
306
+ generator=torch.Generator(device="cpu").manual_seed(seed),
307
  ).frames
308
+ pipe.to("cpu")
309
  gc.collect()
310
  return (video_pt, seed)
311