Update handler.py
Browse files- handler.py +15 -12
handler.py
CHANGED
@@ -6,6 +6,8 @@ import logging
|
|
6 |
import random
|
7 |
import traceback
|
8 |
import torch
|
|
|
|
|
9 |
from diffusers import HunyuanVideoPipeline, HunyuanVideoTransformer3DModel
|
10 |
from varnish import Varnish
|
11 |
from varnish.utils import is_truthy, process_input_image
|
@@ -95,18 +97,19 @@ class EndpointHandler:
|
|
95 |
)
|
96 |
|
97 |
if support_image_prompt:
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
#
|
106 |
-
|
107 |
-
self.image_to_video.
|
108 |
-
self.image_to_video.
|
109 |
-
self.image_to_video.
|
|
|
110 |
else:
|
111 |
# Initialize text-to-video pipeline
|
112 |
self.text_to_video = HunyuanVideoPipeline.from_pretrained(
|
|
|
6 |
import random
|
7 |
import traceback
|
8 |
import torch
|
9 |
+
|
10 |
+
# note: there is no HunyuanImageToVideoPipeline yet in Diffusers
|
11 |
from diffusers import HunyuanVideoPipeline, HunyuanVideoTransformer3DModel
|
12 |
from varnish import Varnish
|
13 |
from varnish.utils import is_truthy, process_input_image
|
|
|
97 |
)
|
98 |
|
99 |
if support_image_prompt:
|
100 |
+
raise Exception("Please use a version of Diffusers that supports HunyuanImageToVideoPipeline")
|
101 |
+
# # Initialize image-to-video pipeline
|
102 |
+
# self.image_to_video = HunyuanImageToVideoPipeline.from_pretrained(
|
103 |
+
# path,
|
104 |
+
# transformer=transformer,
|
105 |
+
# torch_dtype=torch.float16,
|
106 |
+
# ).to(self.device)
|
107 |
+
#
|
108 |
+
# # Initialize components in appropriate precision
|
109 |
+
# self.image_to_video.text_encoder = self.image_to_video.text_encoder.half()
|
110 |
+
# self.image_to_video.text_encoder_2 = self.image_to_video.text_encoder_2.half()
|
111 |
+
# self.image_to_video.transformer = self.image_to_video.transformer.to(torch.bfloat16)
|
112 |
+
# self.image_to_video.vae = self.image_to_video.vae.half()
|
113 |
else:
|
114 |
# Initialize text-to-video pipeline
|
115 |
self.text_to_video = HunyuanVideoPipeline.from_pretrained(
|