jbilcke-hf HF staff commited on
Commit
bab295a
·
verified ·
1 Parent(s): 03f0c45

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +14 -13
handler.py CHANGED
@@ -110,6 +110,12 @@ class EndpointHandler:
110
  # self.image_to_video.text_encoder_2 = self.image_to_video.text_encoder_2.half()
111
  # self.image_to_video.transformer = self.image_to_video.transformer.to(torch.bfloat16)
112
  # self.image_to_video.vae = self.image_to_video.vae.half()
 
 
 
 
 
 
113
  else:
114
  # Initialize text-to-video pipeline
115
  self.text_to_video = HunyuanVideoPipeline.from_pretrained(
@@ -124,6 +130,14 @@ class EndpointHandler:
124
  self.text_to_video.transformer = self.text_to_video.transformer.to(torch.bfloat16)
125
  self.text_to_video.vae = self.text_to_video.vae.half()
126
 
 
 
 
 
 
 
 
 
127
  # enable FasterCache
128
 
129
  # those values are coming from here:
@@ -336,21 +350,8 @@ class EndpointHandler:
336
  config.input_image_quality,
337
  )
338
  generation_kwargs["image"] = processed_image
339
-
340
- apply_enhance_a_video(self.image_to_video.transformer, EnhanceAVideoConfig(
341
- weight=config.enhance_a_video_weight if config.enable_enhance_a_video else 0.0,
342
- num_frames_callback=lambda: (config.num_frames - 1),
343
- _attention_type=1
344
- ))
345
-
346
  frames = self.image_to_video(**generation_kwargs).frames
347
  else:
348
- apply_enhance_a_video(self.text_to_video.transformer, EnhanceAVideoConfig(
349
- weight=config.enhance_a_video_weight if config.enable_enhance_a_video else 0.0,
350
- num_frames_callback=lambda: (config.num_frames - 1),
351
- _attention_type=1
352
- ))
353
-
354
  frames = self.text_to_video(**generation_kwargs).frames
355
 
356
 
 
110
  # self.image_to_video.text_encoder_2 = self.image_to_video.text_encoder_2.half()
111
  # self.image_to_video.transformer = self.image_to_video.transformer.to(torch.bfloat16)
112
  # self.image_to_video.vae = self.image_to_video.vae.half()
113
+
114
+ # apply_enhance_a_video(self.image_to_video.transformer, EnhanceAVideoConfig(
115
+ # weight=config.enhance_a_video_weight if config.enable_enhance_a_video else 0.0,
116
+ # num_frames_callback=lambda: (config.num_frames - 1),
117
+ # _attention_type=1
118
+ # ))
119
  else:
120
  # Initialize text-to-video pipeline
121
  self.text_to_video = HunyuanVideoPipeline.from_pretrained(
 
130
  self.text_to_video.transformer = self.text_to_video.transformer.to(torch.bfloat16)
131
  self.text_to_video.vae = self.text_to_video.vae.half()
132
 
133
+
134
+ # apply_enhance_a_video(self.text_to_video.transformer, EnhanceAVideoConfig(
135
+ # # weight=config.enhance_a_video_weight if config.enable_enhance_a_video else 0.0,
136
+ # weight=config.enhance_a_video_weight,
137
+ # num_frames_callback=lambda: (config.num_frames - 1),
138
+ # _attention_type=1
139
+ # ))
140
+
141
  # enable FasterCache
142
 
143
  # those values are coming from here:
 
350
  config.input_image_quality,
351
  )
352
  generation_kwargs["image"] = processed_image
 
 
 
 
 
 
 
353
  frames = self.image_to_video(**generation_kwargs).frames
354
  else:
 
 
 
 
 
 
355
  frames = self.text_to_video(**generation_kwargs).frames
356
 
357