Diffusers Bot
commited on
Upload folder using huggingface_hub
Browse files
main/README.md
CHANGED
@@ -1487,17 +1487,16 @@ NOTE: The ONNX conversions and TensorRT engine build may take up to 30 minutes.
|
|
1487 |
```python
|
1488 |
import torch
|
1489 |
from diffusers import DDIMScheduler
|
1490 |
-
from diffusers.pipelines
|
1491 |
|
1492 |
# Use the DDIMScheduler scheduler here instead
|
1493 |
-
scheduler = DDIMScheduler.from_pretrained("stabilityai/stable-diffusion-2-1",
|
1494 |
-
subfolder="scheduler")
|
1495 |
|
1496 |
-
pipe =
|
1497 |
-
|
1498 |
-
|
1499 |
-
|
1500 |
-
|
1501 |
|
1502 |
# re-use cached folder to save ONNX models and TensorRT Engines
|
1503 |
pipe.set_cached_folder("stabilityai/stable-diffusion-2-1", variant='fp16',)
|
@@ -2231,12 +2230,12 @@ from io import BytesIO
|
|
2231 |
from PIL import Image
|
2232 |
import torch
|
2233 |
from diffusers import PNDMScheduler
|
2234 |
-
from diffusers.pipelines
|
2235 |
|
2236 |
# Use the PNDMScheduler scheduler here instead
|
2237 |
scheduler = PNDMScheduler.from_pretrained("stabilityai/stable-diffusion-2-inpainting", subfolder="scheduler")
|
2238 |
|
2239 |
-
pipe =
|
2240 |
custom_pipeline="stable_diffusion_tensorrt_inpaint",
|
2241 |
variant='fp16',
|
2242 |
torch_dtype=torch.float16,
|
|
|
1487 |
```python
|
1488 |
import torch
|
1489 |
from diffusers import DDIMScheduler
|
1490 |
+
from diffusers.pipelines import DiffusionPipeline
|
1491 |
|
1492 |
# Use the DDIMScheduler scheduler here instead
|
1493 |
+
scheduler = DDIMScheduler.from_pretrained("stabilityai/stable-diffusion-2-1", subfolder="scheduler")
|
|
|
1494 |
|
1495 |
+
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1",
|
1496 |
+
custom_pipeline="stable_diffusion_tensorrt_txt2img",
|
1497 |
+
variant='fp16',
|
1498 |
+
torch_dtype=torch.float16,
|
1499 |
+
scheduler=scheduler,)
|
1500 |
|
1501 |
# re-use cached folder to save ONNX models and TensorRT Engines
|
1502 |
pipe.set_cached_folder("stabilityai/stable-diffusion-2-1", variant='fp16',)
|
|
|
2230 |
from PIL import Image
|
2231 |
import torch
|
2232 |
from diffusers import PNDMScheduler
|
2233 |
+
from diffusers.pipelines import DiffusionPipeline
|
2234 |
|
2235 |
# Use the PNDMScheduler scheduler here instead
|
2236 |
scheduler = PNDMScheduler.from_pretrained("stabilityai/stable-diffusion-2-inpainting", subfolder="scheduler")
|
2237 |
|
2238 |
+
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-inpainting",
|
2239 |
custom_pipeline="stable_diffusion_tensorrt_inpaint",
|
2240 |
variant='fp16',
|
2241 |
torch_dtype=torch.float16,
|
main/stable_diffusion_tensorrt_img2img.py
CHANGED
@@ -60,7 +60,7 @@ from diffusers.utils import logging
|
|
60 |
"""
|
61 |
Installation instructions
|
62 |
python3 -m pip install --upgrade transformers diffusers>=0.16.0
|
63 |
-
python3 -m pip install --upgrade tensorrt
|
64 |
python3 -m pip install --upgrade polygraphy>=0.47.0 onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com
|
65 |
python3 -m pip install onnxruntime
|
66 |
"""
|
@@ -659,7 +659,7 @@ class TensorRTStableDiffusionImg2ImgPipeline(DiffusionPipeline):
|
|
659 |
r"""
|
660 |
Pipeline for image-to-image generation using TensorRT accelerated Stable Diffusion.
|
661 |
|
662 |
-
This model inherits from [`
|
663 |
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
664 |
|
665 |
Args:
|
|
|
60 |
"""
|
61 |
Installation instructions
|
62 |
python3 -m pip install --upgrade transformers diffusers>=0.16.0
|
63 |
+
python3 -m pip install --upgrade tensorrt~=10.2.0
|
64 |
python3 -m pip install --upgrade polygraphy>=0.47.0 onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com
|
65 |
python3 -m pip install onnxruntime
|
66 |
"""
|
|
|
659 |
r"""
|
660 |
Pipeline for image-to-image generation using TensorRT accelerated Stable Diffusion.
|
661 |
|
662 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
663 |
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
664 |
|
665 |
Args:
|
main/stable_diffusion_tensorrt_inpaint.py
CHANGED
@@ -18,8 +18,7 @@
|
|
18 |
import gc
|
19 |
import os
|
20 |
from collections import OrderedDict
|
21 |
-
from
|
22 |
-
from typing import List, Optional, Union
|
23 |
|
24 |
import numpy as np
|
25 |
import onnx
|
@@ -27,9 +26,11 @@ import onnx_graphsurgeon as gs
|
|
27 |
import PIL.Image
|
28 |
import tensorrt as trt
|
29 |
import torch
|
|
|
30 |
from huggingface_hub import snapshot_download
|
31 |
from huggingface_hub.utils import validate_hf_hub_args
|
32 |
from onnx import shape_inference
|
|
|
33 |
from polygraphy import cuda
|
34 |
from polygraphy.backend.common import bytes_from_path
|
35 |
from polygraphy.backend.onnx.loader import fold_constants
|
@@ -41,24 +42,29 @@ from polygraphy.backend.trt import (
|
|
41 |
network_from_onnx_path,
|
42 |
save_engine,
|
43 |
)
|
44 |
-
from polygraphy.backend.trt import util as trt_util
|
45 |
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
|
46 |
|
|
|
|
|
|
|
47 |
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
48 |
from diffusers.pipelines.stable_diffusion import (
|
49 |
-
StableDiffusionInpaintPipeline,
|
50 |
StableDiffusionPipelineOutput,
|
51 |
StableDiffusionSafetyChecker,
|
52 |
)
|
53 |
-
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import
|
|
|
|
|
|
|
54 |
from diffusers.schedulers import DDIMScheduler
|
55 |
from diffusers.utils import logging
|
|
|
56 |
|
57 |
|
58 |
"""
|
59 |
Installation instructions
|
60 |
python3 -m pip install --upgrade transformers diffusers>=0.16.0
|
61 |
-
python3 -m pip install --upgrade tensorrt
|
62 |
python3 -m pip install --upgrade polygraphy>=0.47.0 onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com
|
63 |
python3 -m pip install onnxruntime
|
64 |
"""
|
@@ -88,10 +94,6 @@ else:
|
|
88 |
torch_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_torch_dtype_dict.items()}
|
89 |
|
90 |
|
91 |
-
def device_view(t):
|
92 |
-
return cuda.DeviceView(ptr=t.data_ptr(), shape=t.shape, dtype=torch_to_numpy_dtype_dict[t.dtype])
|
93 |
-
|
94 |
-
|
95 |
def preprocess_image(image):
|
96 |
"""
|
97 |
image: torch.Tensor
|
@@ -125,10 +127,8 @@ class Engine:
|
|
125 |
onnx_path,
|
126 |
fp16,
|
127 |
input_profile=None,
|
128 |
-
enable_preview=False,
|
129 |
enable_all_tactics=False,
|
130 |
timing_cache=None,
|
131 |
-
workspace_size=0,
|
132 |
):
|
133 |
logger.warning(f"Building TensorRT engine for {onnx_path}: {self.engine_path}")
|
134 |
p = Profile()
|
@@ -137,20 +137,13 @@ class Engine:
|
|
137 |
assert len(dims) == 3
|
138 |
p.add(name, min=dims[0], opt=dims[1], max=dims[2])
|
139 |
|
140 |
-
|
141 |
-
|
142 |
-
config_kwargs["preview_features"] = [trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805]
|
143 |
-
if enable_preview:
|
144 |
-
# Faster dynamic shapes made optional since it increases engine build time.
|
145 |
-
config_kwargs["preview_features"].append(trt.PreviewFeature.FASTER_DYNAMIC_SHAPES_0805)
|
146 |
-
if workspace_size > 0:
|
147 |
-
config_kwargs["memory_pool_limits"] = {trt.MemoryPoolType.WORKSPACE: workspace_size}
|
148 |
if not enable_all_tactics:
|
149 |
-
|
150 |
|
151 |
engine = engine_from_network(
|
152 |
network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]),
|
153 |
-
config=CreateConfig(fp16=fp16, profiles=[p], load_timing_cache=timing_cache, **
|
154 |
save_timing_cache=timing_cache,
|
155 |
)
|
156 |
save_engine(engine, path=self.engine_path)
|
@@ -163,28 +156,24 @@ class Engine:
|
|
163 |
self.context = self.engine.create_execution_context()
|
164 |
|
165 |
def allocate_buffers(self, shape_dict=None, device="cuda"):
|
166 |
-
for
|
167 |
-
|
168 |
-
if shape_dict and
|
169 |
-
shape = shape_dict[
|
170 |
else:
|
171 |
-
shape = self.engine.
|
172 |
-
dtype = trt.nptype(self.engine.
|
173 |
-
if self.engine.
|
174 |
-
self.context.
|
175 |
tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device)
|
176 |
-
self.tensors[
|
177 |
-
self.buffers[binding] = cuda.DeviceView(ptr=tensor.data_ptr(), shape=shape, dtype=dtype)
|
178 |
|
179 |
def infer(self, feed_dict, stream):
|
180 |
-
start_binding, end_binding = trt_util.get_active_profile_bindings(self.context)
|
181 |
-
# shallow copy of ordered dict
|
182 |
-
device_buffers = copy(self.buffers)
|
183 |
for name, buf in feed_dict.items():
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
noerror = self.context.
|
188 |
if not noerror:
|
189 |
raise ValueError("ERROR: inference failed.")
|
190 |
|
@@ -325,10 +314,8 @@ def build_engines(
|
|
325 |
force_engine_rebuild=False,
|
326 |
static_batch=False,
|
327 |
static_shape=True,
|
328 |
-
enable_preview=False,
|
329 |
enable_all_tactics=False,
|
330 |
timing_cache=None,
|
331 |
-
max_workspace_size=0,
|
332 |
):
|
333 |
built_engines = {}
|
334 |
if not os.path.isdir(onnx_dir):
|
@@ -393,9 +380,7 @@ def build_engines(
|
|
393 |
static_batch=static_batch,
|
394 |
static_shape=static_shape,
|
395 |
),
|
396 |
-
enable_preview=enable_preview,
|
397 |
timing_cache=timing_cache,
|
398 |
-
workspace_size=max_workspace_size,
|
399 |
)
|
400 |
built_engines[model_name] = engine
|
401 |
|
@@ -674,11 +659,11 @@ def make_VAEEncoder(model, device, max_batch_size, embedding_dim, inpaint=False)
|
|
674 |
return VAEEncoder(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
|
675 |
|
676 |
|
677 |
-
class TensorRTStableDiffusionInpaintPipeline(
|
678 |
r"""
|
679 |
Pipeline for inpainting using TensorRT accelerated Stable Diffusion.
|
680 |
|
681 |
-
This model inherits from [`
|
682 |
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
683 |
|
684 |
Args:
|
@@ -702,6 +687,8 @@ class TensorRTStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline):
|
|
702 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
703 |
"""
|
704 |
|
|
|
|
|
705 |
def __init__(
|
706 |
self,
|
707 |
vae: AutoencoderKL,
|
@@ -722,24 +709,86 @@ class TensorRTStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline):
|
|
722 |
onnx_dir: str = "onnx",
|
723 |
# TensorRT engine build parameters
|
724 |
engine_dir: str = "engine",
|
725 |
-
build_preview_features: bool = True,
|
726 |
force_engine_rebuild: bool = False,
|
727 |
timing_cache: str = "timing_cache",
|
728 |
):
|
729 |
-
super().__init__(
|
730 |
-
|
731 |
-
|
732 |
-
|
733 |
-
|
734 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
735 |
safety_checker=safety_checker,
|
736 |
feature_extractor=feature_extractor,
|
737 |
image_encoder=image_encoder,
|
738 |
-
requires_safety_checker=requires_safety_checker,
|
739 |
)
|
740 |
|
741 |
-
self.vae.forward = self.vae.decode
|
742 |
-
|
743 |
self.stages = stages
|
744 |
self.image_height, self.image_width = image_height, image_width
|
745 |
self.inpaint = True
|
@@ -750,7 +799,6 @@ class TensorRTStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline):
|
|
750 |
self.timing_cache = timing_cache
|
751 |
self.build_static_batch = False
|
752 |
self.build_dynamic_shape = False
|
753 |
-
self.build_preview_features = build_preview_features
|
754 |
|
755 |
self.max_batch_size = max_batch_size
|
756 |
# TODO: Restrict batch size to 4 for larger image dimensions as a WAR for TensorRT limitation.
|
@@ -761,6 +809,11 @@ class TensorRTStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline):
|
|
761 |
self.models = {} # loaded in __loadModels()
|
762 |
self.engine = {} # loaded in build_engines()
|
763 |
|
|
|
|
|
|
|
|
|
|
|
764 |
def __loadModels(self):
|
765 |
# Load pipeline models
|
766 |
self.embedding_dim = self.text_encoder.config.hidden_size
|
@@ -779,6 +832,112 @@ class TensorRTStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline):
|
|
779 |
if "vae_encoder" in self.stages:
|
780 |
self.models["vae_encoder"] = make_VAEEncoder(self.vae, **models_args)
|
781 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
782 |
@classmethod
|
783 |
@validate_hf_hub_args
|
784 |
def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
|
@@ -826,7 +985,6 @@ class TensorRTStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline):
|
|
826 |
force_engine_rebuild=self.force_engine_rebuild,
|
827 |
static_batch=self.build_static_batch,
|
828 |
static_shape=not self.build_dynamic_shape,
|
829 |
-
enable_preview=self.build_preview_features,
|
830 |
timing_cache=self.timing_cache,
|
831 |
)
|
832 |
|
@@ -850,9 +1008,7 @@ class TensorRTStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline):
|
|
850 |
return tuple(init_images)
|
851 |
|
852 |
def __encode_image(self, init_image):
|
853 |
-
init_latents = runEngine(self.engine["vae_encoder"], {"images":
|
854 |
-
"latent"
|
855 |
-
]
|
856 |
init_latents = 0.18215 * init_latents
|
857 |
return init_latents
|
858 |
|
@@ -881,9 +1037,8 @@ class TensorRTStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline):
|
|
881 |
.to(self.torch_device)
|
882 |
)
|
883 |
|
884 |
-
text_input_ids_inp = device_view(text_input_ids)
|
885 |
# NOTE: output tensor for CLIP must be cloned because it will be overwritten when called again for negative prompt
|
886 |
-
text_embeddings = runEngine(self.engine["clip"], {"input_ids":
|
887 |
"text_embeddings"
|
888 |
].clone()
|
889 |
|
@@ -899,8 +1054,7 @@ class TensorRTStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline):
|
|
899 |
.input_ids.type(torch.int32)
|
900 |
.to(self.torch_device)
|
901 |
)
|
902 |
-
|
903 |
-
uncond_embeddings = runEngine(self.engine["clip"], {"input_ids": uncond_input_ids_inp}, self.stream)[
|
904 |
"text_embeddings"
|
905 |
]
|
906 |
|
@@ -924,18 +1078,15 @@ class TensorRTStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline):
|
|
924 |
# Predict the noise residual
|
925 |
timestep_float = timestep.float() if timestep.dtype != torch.float32 else timestep
|
926 |
|
927 |
-
sample_inp = device_view(latent_model_input)
|
928 |
-
timestep_inp = device_view(timestep_float)
|
929 |
-
embeddings_inp = device_view(text_embeddings)
|
930 |
noise_pred = runEngine(
|
931 |
self.engine["unet"],
|
932 |
-
{"sample":
|
933 |
self.stream,
|
934 |
)["latent"]
|
935 |
|
936 |
# Perform guidance
|
937 |
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
938 |
-
noise_pred = noise_pred_uncond + self.
|
939 |
|
940 |
latents = self.scheduler.step(noise_pred, timestep, latents).prev_sample
|
941 |
|
@@ -943,12 +1094,12 @@ class TensorRTStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline):
|
|
943 |
return latents
|
944 |
|
945 |
def __decode_latent(self, latents):
|
946 |
-
images = runEngine(self.engine["vae"], {"latent":
|
947 |
images = (images / 2 + 0.5).clamp(0, 1)
|
948 |
return images.cpu().permute(0, 2, 3, 1).float().numpy()
|
949 |
|
950 |
def __loadResources(self, image_height, image_width, batch_size):
|
951 |
-
self.stream =
|
952 |
|
953 |
# Allocate buffers for TensorRT engine bindings
|
954 |
for model_name, obj in self.models.items():
|
@@ -1112,5 +1263,6 @@ class TensorRTStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline):
|
|
1112 |
# VAE decode latent
|
1113 |
images = self.__decode_latent(latents)
|
1114 |
|
|
|
1115 |
images = self.numpy_to_pil(images)
|
1116 |
-
return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=
|
|
|
18 |
import gc
|
19 |
import os
|
20 |
from collections import OrderedDict
|
21 |
+
from typing import List, Optional, Tuple, Union
|
|
|
22 |
|
23 |
import numpy as np
|
24 |
import onnx
|
|
|
26 |
import PIL.Image
|
27 |
import tensorrt as trt
|
28 |
import torch
|
29 |
+
from cuda import cudart
|
30 |
from huggingface_hub import snapshot_download
|
31 |
from huggingface_hub.utils import validate_hf_hub_args
|
32 |
from onnx import shape_inference
|
33 |
+
from packaging import version
|
34 |
from polygraphy import cuda
|
35 |
from polygraphy.backend.common import bytes_from_path
|
36 |
from polygraphy.backend.onnx.loader import fold_constants
|
|
|
42 |
network_from_onnx_path,
|
43 |
save_engine,
|
44 |
)
|
|
|
45 |
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
|
46 |
|
47 |
+
from diffusers import DiffusionPipeline
|
48 |
+
from diffusers.configuration_utils import FrozenDict, deprecate
|
49 |
+
from diffusers.image_processor import VaeImageProcessor
|
50 |
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
51 |
from diffusers.pipelines.stable_diffusion import (
|
|
|
52 |
StableDiffusionPipelineOutput,
|
53 |
StableDiffusionSafetyChecker,
|
54 |
)
|
55 |
+
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import (
|
56 |
+
prepare_mask_and_masked_image,
|
57 |
+
retrieve_latents,
|
58 |
+
)
|
59 |
from diffusers.schedulers import DDIMScheduler
|
60 |
from diffusers.utils import logging
|
61 |
+
from diffusers.utils.torch_utils import randn_tensor
|
62 |
|
63 |
|
64 |
"""
|
65 |
Installation instructions
|
66 |
python3 -m pip install --upgrade transformers diffusers>=0.16.0
|
67 |
+
python3 -m pip install --upgrade tensorrt~=10.2.0
|
68 |
python3 -m pip install --upgrade polygraphy>=0.47.0 onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com
|
69 |
python3 -m pip install onnxruntime
|
70 |
"""
|
|
|
94 |
torch_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_torch_dtype_dict.items()}
|
95 |
|
96 |
|
|
|
|
|
|
|
|
|
97 |
def preprocess_image(image):
|
98 |
"""
|
99 |
image: torch.Tensor
|
|
|
127 |
onnx_path,
|
128 |
fp16,
|
129 |
input_profile=None,
|
|
|
130 |
enable_all_tactics=False,
|
131 |
timing_cache=None,
|
|
|
132 |
):
|
133 |
logger.warning(f"Building TensorRT engine for {onnx_path}: {self.engine_path}")
|
134 |
p = Profile()
|
|
|
137 |
assert len(dims) == 3
|
138 |
p.add(name, min=dims[0], opt=dims[1], max=dims[2])
|
139 |
|
140 |
+
extra_build_args = {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
141 |
if not enable_all_tactics:
|
142 |
+
extra_build_args["tactic_sources"] = []
|
143 |
|
144 |
engine = engine_from_network(
|
145 |
network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]),
|
146 |
+
config=CreateConfig(fp16=fp16, profiles=[p], load_timing_cache=timing_cache, **extra_build_args),
|
147 |
save_timing_cache=timing_cache,
|
148 |
)
|
149 |
save_engine(engine, path=self.engine_path)
|
|
|
156 |
self.context = self.engine.create_execution_context()
|
157 |
|
158 |
def allocate_buffers(self, shape_dict=None, device="cuda"):
|
159 |
+
for binding in range(self.engine.num_io_tensors):
|
160 |
+
name = self.engine.get_tensor_name(binding)
|
161 |
+
if shape_dict and name in shape_dict:
|
162 |
+
shape = shape_dict[name]
|
163 |
else:
|
164 |
+
shape = self.engine.get_tensor_shape(name)
|
165 |
+
dtype = trt.nptype(self.engine.get_tensor_dtype(name))
|
166 |
+
if self.engine.get_tensor_mode(name) == trt.TensorIOMode.INPUT:
|
167 |
+
self.context.set_input_shape(name, shape)
|
168 |
tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device)
|
169 |
+
self.tensors[name] = tensor
|
|
|
170 |
|
171 |
def infer(self, feed_dict, stream):
|
|
|
|
|
|
|
172 |
for name, buf in feed_dict.items():
|
173 |
+
self.tensors[name].copy_(buf)
|
174 |
+
for name, tensor in self.tensors.items():
|
175 |
+
self.context.set_tensor_address(name, tensor.data_ptr())
|
176 |
+
noerror = self.context.execute_async_v3(stream)
|
177 |
if not noerror:
|
178 |
raise ValueError("ERROR: inference failed.")
|
179 |
|
|
|
314 |
force_engine_rebuild=False,
|
315 |
static_batch=False,
|
316 |
static_shape=True,
|
|
|
317 |
enable_all_tactics=False,
|
318 |
timing_cache=None,
|
|
|
319 |
):
|
320 |
built_engines = {}
|
321 |
if not os.path.isdir(onnx_dir):
|
|
|
380 |
static_batch=static_batch,
|
381 |
static_shape=static_shape,
|
382 |
),
|
|
|
383 |
timing_cache=timing_cache,
|
|
|
384 |
)
|
385 |
built_engines[model_name] = engine
|
386 |
|
|
|
659 |
return VAEEncoder(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
|
660 |
|
661 |
|
662 |
+
class TensorRTStableDiffusionInpaintPipeline(DiffusionPipeline):
|
663 |
r"""
|
664 |
Pipeline for inpainting using TensorRT accelerated Stable Diffusion.
|
665 |
|
666 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
667 |
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
668 |
|
669 |
Args:
|
|
|
687 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
688 |
"""
|
689 |
|
690 |
+
_optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
|
691 |
+
|
692 |
def __init__(
|
693 |
self,
|
694 |
vae: AutoencoderKL,
|
|
|
709 |
onnx_dir: str = "onnx",
|
710 |
# TensorRT engine build parameters
|
711 |
engine_dir: str = "engine",
|
|
|
712 |
force_engine_rebuild: bool = False,
|
713 |
timing_cache: str = "timing_cache",
|
714 |
):
|
715 |
+
super().__init__()
|
716 |
+
|
717 |
+
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
718 |
+
deprecation_message = (
|
719 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
720 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
721 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
722 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
723 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
724 |
+
" file"
|
725 |
+
)
|
726 |
+
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
727 |
+
new_config = dict(scheduler.config)
|
728 |
+
new_config["steps_offset"] = 1
|
729 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
730 |
+
|
731 |
+
if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
|
732 |
+
deprecation_message = (
|
733 |
+
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
734 |
+
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
735 |
+
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
736 |
+
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
737 |
+
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
738 |
+
)
|
739 |
+
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
|
740 |
+
new_config = dict(scheduler.config)
|
741 |
+
new_config["clip_sample"] = False
|
742 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
743 |
+
|
744 |
+
if safety_checker is None and requires_safety_checker:
|
745 |
+
logger.warning(
|
746 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
747 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
748 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
749 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
750 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
751 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
752 |
+
)
|
753 |
+
|
754 |
+
if safety_checker is not None and feature_extractor is None:
|
755 |
+
raise ValueError(
|
756 |
+
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
757 |
+
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
758 |
+
)
|
759 |
+
|
760 |
+
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
|
761 |
+
version.parse(unet.config._diffusers_version).base_version
|
762 |
+
) < version.parse("0.9.0.dev0")
|
763 |
+
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
764 |
+
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
765 |
+
deprecation_message = (
|
766 |
+
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
767 |
+
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
768 |
+
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
769 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
|
770 |
+
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
771 |
+
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
772 |
+
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
773 |
+
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
774 |
+
" the `unet/config.json` file"
|
775 |
+
)
|
776 |
+
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
777 |
+
new_config = dict(unet.config)
|
778 |
+
new_config["sample_size"] = 64
|
779 |
+
unet._internal_dict = FrozenDict(new_config)
|
780 |
+
|
781 |
+
self.register_modules(
|
782 |
+
vae=vae,
|
783 |
+
text_encoder=text_encoder,
|
784 |
+
tokenizer=tokenizer,
|
785 |
+
unet=unet,
|
786 |
+
scheduler=scheduler,
|
787 |
safety_checker=safety_checker,
|
788 |
feature_extractor=feature_extractor,
|
789 |
image_encoder=image_encoder,
|
|
|
790 |
)
|
791 |
|
|
|
|
|
792 |
self.stages = stages
|
793 |
self.image_height, self.image_width = image_height, image_width
|
794 |
self.inpaint = True
|
|
|
799 |
self.timing_cache = timing_cache
|
800 |
self.build_static_batch = False
|
801 |
self.build_dynamic_shape = False
|
|
|
802 |
|
803 |
self.max_batch_size = max_batch_size
|
804 |
# TODO: Restrict batch size to 4 for larger image dimensions as a WAR for TensorRT limitation.
|
|
|
809 |
self.models = {} # loaded in __loadModels()
|
810 |
self.engine = {} # loaded in build_engines()
|
811 |
|
812 |
+
self.vae.forward = self.vae.decode
|
813 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
814 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
815 |
+
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
816 |
+
|
817 |
def __loadModels(self):
|
818 |
# Load pipeline models
|
819 |
self.embedding_dim = self.text_encoder.config.hidden_size
|
|
|
832 |
if "vae_encoder" in self.stages:
|
833 |
self.models["vae_encoder"] = make_VAEEncoder(self.vae, **models_args)
|
834 |
|
835 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline
|
836 |
+
|
837 |
+
def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
|
838 |
+
if isinstance(generator, list):
|
839 |
+
image_latents = [
|
840 |
+
retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
|
841 |
+
for i in range(image.shape[0])
|
842 |
+
]
|
843 |
+
image_latents = torch.cat(image_latents, dim=0)
|
844 |
+
else:
|
845 |
+
image_latents = retrieve_latents(self.vae.encode(image), generator=generator)
|
846 |
+
|
847 |
+
image_latents = self.vae.config.scaling_factor * image_latents
|
848 |
+
|
849 |
+
return image_latents
|
850 |
+
|
851 |
+
def prepare_latents(
|
852 |
+
self,
|
853 |
+
batch_size,
|
854 |
+
num_channels_latents,
|
855 |
+
height,
|
856 |
+
width,
|
857 |
+
dtype,
|
858 |
+
device,
|
859 |
+
generator,
|
860 |
+
latents=None,
|
861 |
+
image=None,
|
862 |
+
timestep=None,
|
863 |
+
is_strength_max=True,
|
864 |
+
return_noise=False,
|
865 |
+
return_image_latents=False,
|
866 |
+
):
|
867 |
+
shape = (
|
868 |
+
batch_size,
|
869 |
+
num_channels_latents,
|
870 |
+
int(height) // self.vae_scale_factor,
|
871 |
+
int(width) // self.vae_scale_factor,
|
872 |
+
)
|
873 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
874 |
+
raise ValueError(
|
875 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
876 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
877 |
+
)
|
878 |
+
|
879 |
+
if (image is None or timestep is None) and not is_strength_max:
|
880 |
+
raise ValueError(
|
881 |
+
"Since strength < 1. initial latents are to be initialised as a combination of Image + Noise."
|
882 |
+
"However, either the image or the noise timestep has not been provided."
|
883 |
+
)
|
884 |
+
|
885 |
+
if return_image_latents or (latents is None and not is_strength_max):
|
886 |
+
image = image.to(device=device, dtype=dtype)
|
887 |
+
|
888 |
+
if image.shape[1] == 4:
|
889 |
+
image_latents = image
|
890 |
+
else:
|
891 |
+
image_latents = self._encode_vae_image(image=image, generator=generator)
|
892 |
+
image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
|
893 |
+
|
894 |
+
if latents is None:
|
895 |
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
896 |
+
# if strength is 1. then initialise the latents to noise, else initial to image + noise
|
897 |
+
latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep)
|
898 |
+
# if pure noise then scale the initial latents by the Scheduler's init sigma
|
899 |
+
latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents
|
900 |
+
else:
|
901 |
+
noise = latents.to(device)
|
902 |
+
latents = noise * self.scheduler.init_noise_sigma
|
903 |
+
|
904 |
+
outputs = (latents,)
|
905 |
+
|
906 |
+
if return_noise:
|
907 |
+
outputs += (noise,)
|
908 |
+
|
909 |
+
if return_image_latents:
|
910 |
+
outputs += (image_latents,)
|
911 |
+
|
912 |
+
return outputs
|
913 |
+
|
914 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
|
915 |
+
def run_safety_checker(
|
916 |
+
self, image: Union[torch.Tensor, PIL.Image.Image], device: torch.device, dtype: torch.dtype
|
917 |
+
) -> Tuple[Union[torch.Tensor, PIL.Image.Image], Optional[bool]]:
|
918 |
+
r"""
|
919 |
+
Runs the safety checker on the given image.
|
920 |
+
Args:
|
921 |
+
image (Union[torch.Tensor, PIL.Image.Image]): The input image to be checked.
|
922 |
+
device (torch.device): The device to run the safety checker on.
|
923 |
+
dtype (torch.dtype): The data type of the input image.
|
924 |
+
Returns:
|
925 |
+
(image, has_nsfw_concept) Tuple[Union[torch.Tensor, PIL.Image.Image], Optional[bool]]: A tuple containing the processed image and
|
926 |
+
a boolean indicating whether the image has a NSFW (Not Safe for Work) concept.
|
927 |
+
"""
|
928 |
+
if self.safety_checker is None:
|
929 |
+
has_nsfw_concept = None
|
930 |
+
else:
|
931 |
+
if torch.is_tensor(image):
|
932 |
+
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
933 |
+
else:
|
934 |
+
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
935 |
+
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
936 |
+
image, has_nsfw_concept = self.safety_checker(
|
937 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
938 |
+
)
|
939 |
+
return image, has_nsfw_concept
|
940 |
+
|
941 |
@classmethod
|
942 |
@validate_hf_hub_args
|
943 |
def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
|
|
|
985 |
force_engine_rebuild=self.force_engine_rebuild,
|
986 |
static_batch=self.build_static_batch,
|
987 |
static_shape=not self.build_dynamic_shape,
|
|
|
988 |
timing_cache=self.timing_cache,
|
989 |
)
|
990 |
|
|
|
1008 |
return tuple(init_images)
|
1009 |
|
1010 |
def __encode_image(self, init_image):
|
1011 |
+
init_latents = runEngine(self.engine["vae_encoder"], {"images": init_image}, self.stream)["latent"]
|
|
|
|
|
1012 |
init_latents = 0.18215 * init_latents
|
1013 |
return init_latents
|
1014 |
|
|
|
1037 |
.to(self.torch_device)
|
1038 |
)
|
1039 |
|
|
|
1040 |
# NOTE: output tensor for CLIP must be cloned because it will be overwritten when called again for negative prompt
|
1041 |
+
text_embeddings = runEngine(self.engine["clip"], {"input_ids": text_input_ids}, self.stream)[
|
1042 |
"text_embeddings"
|
1043 |
].clone()
|
1044 |
|
|
|
1054 |
.input_ids.type(torch.int32)
|
1055 |
.to(self.torch_device)
|
1056 |
)
|
1057 |
+
uncond_embeddings = runEngine(self.engine["clip"], {"input_ids": uncond_input_ids}, self.stream)[
|
|
|
1058 |
"text_embeddings"
|
1059 |
]
|
1060 |
|
|
|
1078 |
# Predict the noise residual
|
1079 |
timestep_float = timestep.float() if timestep.dtype != torch.float32 else timestep
|
1080 |
|
|
|
|
|
|
|
1081 |
noise_pred = runEngine(
|
1082 |
self.engine["unet"],
|
1083 |
+
{"sample": latent_model_input, "timestep": timestep_float, "encoder_hidden_states": text_embeddings},
|
1084 |
self.stream,
|
1085 |
)["latent"]
|
1086 |
|
1087 |
# Perform guidance
|
1088 |
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
1089 |
+
noise_pred = noise_pred_uncond + self._guidance_scale * (noise_pred_text - noise_pred_uncond)
|
1090 |
|
1091 |
latents = self.scheduler.step(noise_pred, timestep, latents).prev_sample
|
1092 |
|
|
|
1094 |
return latents
|
1095 |
|
1096 |
def __decode_latent(self, latents):
|
1097 |
+
images = runEngine(self.engine["vae"], {"latent": latents}, self.stream)["images"]
|
1098 |
images = (images / 2 + 0.5).clamp(0, 1)
|
1099 |
return images.cpu().permute(0, 2, 3, 1).float().numpy()
|
1100 |
|
1101 |
def __loadResources(self, image_height, image_width, batch_size):
|
1102 |
+
self.stream = cudart.cudaStreamCreate()[1]
|
1103 |
|
1104 |
# Allocate buffers for TensorRT engine bindings
|
1105 |
for model_name, obj in self.models.items():
|
|
|
1263 |
# VAE decode latent
|
1264 |
images = self.__decode_latent(latents)
|
1265 |
|
1266 |
+
images, has_nsfw_concept = self.run_safety_checker(images, self.torch_device, text_embeddings.dtype)
|
1267 |
images = self.numpy_to_pil(images)
|
1268 |
+
return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)
|
main/stable_diffusion_tensorrt_txt2img.py
CHANGED
@@ -18,17 +18,19 @@
|
|
18 |
import gc
|
19 |
import os
|
20 |
from collections import OrderedDict
|
21 |
-
from
|
22 |
-
from typing import List, Optional, Union
|
23 |
|
24 |
import numpy as np
|
25 |
import onnx
|
26 |
import onnx_graphsurgeon as gs
|
|
|
27 |
import tensorrt as trt
|
28 |
import torch
|
|
|
29 |
from huggingface_hub import snapshot_download
|
30 |
from huggingface_hub.utils import validate_hf_hub_args
|
31 |
from onnx import shape_inference
|
|
|
32 |
from polygraphy import cuda
|
33 |
from polygraphy.backend.common import bytes_from_path
|
34 |
from polygraphy.backend.onnx.loader import fold_constants
|
@@ -40,23 +42,25 @@ from polygraphy.backend.trt import (
|
|
40 |
network_from_onnx_path,
|
41 |
save_engine,
|
42 |
)
|
43 |
-
from polygraphy.backend.trt import util as trt_util
|
44 |
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
|
45 |
|
|
|
|
|
|
|
46 |
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
47 |
from diffusers.pipelines.stable_diffusion import (
|
48 |
-
StableDiffusionPipeline,
|
49 |
StableDiffusionPipelineOutput,
|
50 |
StableDiffusionSafetyChecker,
|
51 |
)
|
52 |
from diffusers.schedulers import DDIMScheduler
|
53 |
from diffusers.utils import logging
|
|
|
54 |
|
55 |
|
56 |
"""
|
57 |
Installation instructions
|
58 |
python3 -m pip install --upgrade transformers diffusers>=0.16.0
|
59 |
-
python3 -m pip install --upgrade tensorrt
|
60 |
python3 -m pip install --upgrade polygraphy>=0.47.0 onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com
|
61 |
python3 -m pip install onnxruntime
|
62 |
"""
|
@@ -86,10 +90,6 @@ else:
|
|
86 |
torch_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_torch_dtype_dict.items()}
|
87 |
|
88 |
|
89 |
-
def device_view(t):
|
90 |
-
return cuda.DeviceView(ptr=t.data_ptr(), shape=t.shape, dtype=torch_to_numpy_dtype_dict[t.dtype])
|
91 |
-
|
92 |
-
|
93 |
class Engine:
|
94 |
def __init__(self, engine_path):
|
95 |
self.engine_path = engine_path
|
@@ -110,10 +110,8 @@ class Engine:
|
|
110 |
onnx_path,
|
111 |
fp16,
|
112 |
input_profile=None,
|
113 |
-
enable_preview=False,
|
114 |
enable_all_tactics=False,
|
115 |
timing_cache=None,
|
116 |
-
workspace_size=0,
|
117 |
):
|
118 |
logger.warning(f"Building TensorRT engine for {onnx_path}: {self.engine_path}")
|
119 |
p = Profile()
|
@@ -122,20 +120,13 @@ class Engine:
|
|
122 |
assert len(dims) == 3
|
123 |
p.add(name, min=dims[0], opt=dims[1], max=dims[2])
|
124 |
|
125 |
-
|
126 |
-
|
127 |
-
config_kwargs["preview_features"] = [trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805]
|
128 |
-
if enable_preview:
|
129 |
-
# Faster dynamic shapes made optional since it increases engine build time.
|
130 |
-
config_kwargs["preview_features"].append(trt.PreviewFeature.FASTER_DYNAMIC_SHAPES_0805)
|
131 |
-
if workspace_size > 0:
|
132 |
-
config_kwargs["memory_pool_limits"] = {trt.MemoryPoolType.WORKSPACE: workspace_size}
|
133 |
if not enable_all_tactics:
|
134 |
-
|
135 |
|
136 |
engine = engine_from_network(
|
137 |
network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]),
|
138 |
-
config=CreateConfig(fp16=fp16, profiles=[p], load_timing_cache=timing_cache, **
|
139 |
save_timing_cache=timing_cache,
|
140 |
)
|
141 |
save_engine(engine, path=self.engine_path)
|
@@ -148,28 +139,24 @@ class Engine:
|
|
148 |
self.context = self.engine.create_execution_context()
|
149 |
|
150 |
def allocate_buffers(self, shape_dict=None, device="cuda"):
|
151 |
-
for
|
152 |
-
|
153 |
-
if shape_dict and
|
154 |
-
shape = shape_dict[
|
155 |
else:
|
156 |
-
shape = self.engine.
|
157 |
-
dtype = trt.nptype(self.engine.
|
158 |
-
if self.engine.
|
159 |
-
self.context.
|
160 |
tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device)
|
161 |
-
self.tensors[
|
162 |
-
self.buffers[binding] = cuda.DeviceView(ptr=tensor.data_ptr(), shape=shape, dtype=dtype)
|
163 |
|
164 |
def infer(self, feed_dict, stream):
|
165 |
-
start_binding, end_binding = trt_util.get_active_profile_bindings(self.context)
|
166 |
-
# shallow copy of ordered dict
|
167 |
-
device_buffers = copy(self.buffers)
|
168 |
for name, buf in feed_dict.items():
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
noerror = self.context.
|
173 |
if not noerror:
|
174 |
raise ValueError("ERROR: inference failed.")
|
175 |
|
@@ -310,10 +297,8 @@ def build_engines(
|
|
310 |
force_engine_rebuild=False,
|
311 |
static_batch=False,
|
312 |
static_shape=True,
|
313 |
-
enable_preview=False,
|
314 |
enable_all_tactics=False,
|
315 |
timing_cache=None,
|
316 |
-
max_workspace_size=0,
|
317 |
):
|
318 |
built_engines = {}
|
319 |
if not os.path.isdir(onnx_dir):
|
@@ -378,9 +363,7 @@ def build_engines(
|
|
378 |
static_batch=static_batch,
|
379 |
static_shape=static_shape,
|
380 |
),
|
381 |
-
enable_preview=enable_preview,
|
382 |
timing_cache=timing_cache,
|
383 |
-
workspace_size=max_workspace_size,
|
384 |
)
|
385 |
built_engines[model_name] = engine
|
386 |
|
@@ -588,11 +571,11 @@ def make_VAE(model, device, max_batch_size, embedding_dim, inpaint=False):
|
|
588 |
return VAE(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
|
589 |
|
590 |
|
591 |
-
class TensorRTStableDiffusionPipeline(
|
592 |
r"""
|
593 |
Pipeline for text-to-image generation using TensorRT accelerated Stable Diffusion.
|
594 |
|
595 |
-
This model inherits from [`
|
596 |
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
597 |
|
598 |
Args:
|
@@ -616,6 +599,8 @@ class TensorRTStableDiffusionPipeline(StableDiffusionPipeline):
|
|
616 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
617 |
"""
|
618 |
|
|
|
|
|
619 |
def __init__(
|
620 |
self,
|
621 |
vae: AutoencoderKL,
|
@@ -632,28 +617,90 @@ class TensorRTStableDiffusionPipeline(StableDiffusionPipeline):
|
|
632 |
image_width: int = 768,
|
633 |
max_batch_size: int = 16,
|
634 |
# ONNX export parameters
|
635 |
-
onnx_opset: int =
|
636 |
onnx_dir: str = "onnx",
|
637 |
# TensorRT engine build parameters
|
638 |
engine_dir: str = "engine",
|
639 |
-
build_preview_features: bool = True,
|
640 |
force_engine_rebuild: bool = False,
|
641 |
timing_cache: str = "timing_cache",
|
642 |
):
|
643 |
-
super().__init__(
|
644 |
-
|
645 |
-
|
646 |
-
|
647 |
-
|
648 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
649 |
safety_checker=safety_checker,
|
650 |
feature_extractor=feature_extractor,
|
651 |
image_encoder=image_encoder,
|
652 |
-
requires_safety_checker=requires_safety_checker,
|
653 |
)
|
654 |
|
655 |
-
self.vae.forward = self.vae.decode
|
656 |
-
|
657 |
self.stages = stages
|
658 |
self.image_height, self.image_width = image_height, image_width
|
659 |
self.inpaint = False
|
@@ -664,7 +711,6 @@ class TensorRTStableDiffusionPipeline(StableDiffusionPipeline):
|
|
664 |
self.timing_cache = timing_cache
|
665 |
self.build_static_batch = False
|
666 |
self.build_dynamic_shape = False
|
667 |
-
self.build_preview_features = build_preview_features
|
668 |
|
669 |
self.max_batch_size = max_batch_size
|
670 |
# TODO: Restrict batch size to 4 for larger image dimensions as a WAR for TensorRT limitation.
|
@@ -675,6 +721,11 @@ class TensorRTStableDiffusionPipeline(StableDiffusionPipeline):
|
|
675 |
self.models = {} # loaded in __loadModels()
|
676 |
self.engine = {} # loaded in build_engines()
|
677 |
|
|
|
|
|
|
|
|
|
|
|
678 |
def __loadModels(self):
|
679 |
# Load pipeline models
|
680 |
self.embedding_dim = self.text_encoder.config.hidden_size
|
@@ -691,6 +742,75 @@ class TensorRTStableDiffusionPipeline(StableDiffusionPipeline):
|
|
691 |
if "vae" in self.stages:
|
692 |
self.models["vae"] = make_VAE(self.vae, **models_args)
|
693 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
694 |
@classmethod
|
695 |
@validate_hf_hub_args
|
696 |
def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
|
@@ -738,7 +858,6 @@ class TensorRTStableDiffusionPipeline(StableDiffusionPipeline):
|
|
738 |
force_engine_rebuild=self.force_engine_rebuild,
|
739 |
static_batch=self.build_static_batch,
|
740 |
static_shape=not self.build_dynamic_shape,
|
741 |
-
enable_preview=self.build_preview_features,
|
742 |
timing_cache=self.timing_cache,
|
743 |
)
|
744 |
|
@@ -769,9 +888,8 @@ class TensorRTStableDiffusionPipeline(StableDiffusionPipeline):
|
|
769 |
.to(self.torch_device)
|
770 |
)
|
771 |
|
772 |
-
text_input_ids_inp = device_view(text_input_ids)
|
773 |
# NOTE: output tensor for CLIP must be cloned because it will be overwritten when called again for negative prompt
|
774 |
-
text_embeddings = runEngine(self.engine["clip"], {"input_ids":
|
775 |
"text_embeddings"
|
776 |
].clone()
|
777 |
|
@@ -787,8 +905,7 @@ class TensorRTStableDiffusionPipeline(StableDiffusionPipeline):
|
|
787 |
.input_ids.type(torch.int32)
|
788 |
.to(self.torch_device)
|
789 |
)
|
790 |
-
|
791 |
-
uncond_embeddings = runEngine(self.engine["clip"], {"input_ids": uncond_input_ids_inp}, self.stream)[
|
792 |
"text_embeddings"
|
793 |
]
|
794 |
|
@@ -812,18 +929,15 @@ class TensorRTStableDiffusionPipeline(StableDiffusionPipeline):
|
|
812 |
# Predict the noise residual
|
813 |
timestep_float = timestep.float() if timestep.dtype != torch.float32 else timestep
|
814 |
|
815 |
-
sample_inp = device_view(latent_model_input)
|
816 |
-
timestep_inp = device_view(timestep_float)
|
817 |
-
embeddings_inp = device_view(text_embeddings)
|
818 |
noise_pred = runEngine(
|
819 |
self.engine["unet"],
|
820 |
-
{"sample":
|
821 |
self.stream,
|
822 |
)["latent"]
|
823 |
|
824 |
# Perform guidance
|
825 |
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
826 |
-
noise_pred = noise_pred_uncond + self.
|
827 |
|
828 |
latents = self.scheduler.step(noise_pred, timestep, latents).prev_sample
|
829 |
|
@@ -831,12 +945,12 @@ class TensorRTStableDiffusionPipeline(StableDiffusionPipeline):
|
|
831 |
return latents
|
832 |
|
833 |
def __decode_latent(self, latents):
|
834 |
-
images = runEngine(self.engine["vae"], {"latent":
|
835 |
images = (images / 2 + 0.5).clamp(0, 1)
|
836 |
return images.cpu().permute(0, 2, 3, 1).float().numpy()
|
837 |
|
838 |
def __loadResources(self, image_height, image_width, batch_size):
|
839 |
-
self.stream =
|
840 |
|
841 |
# Allocate buffers for TensorRT engine bindings
|
842 |
for model_name, obj in self.models.items():
|
|
|
18 |
import gc
|
19 |
import os
|
20 |
from collections import OrderedDict
|
21 |
+
from typing import List, Optional, Tuple, Union
|
|
|
22 |
|
23 |
import numpy as np
|
24 |
import onnx
|
25 |
import onnx_graphsurgeon as gs
|
26 |
+
import PIL.Image
|
27 |
import tensorrt as trt
|
28 |
import torch
|
29 |
+
from cuda import cudart
|
30 |
from huggingface_hub import snapshot_download
|
31 |
from huggingface_hub.utils import validate_hf_hub_args
|
32 |
from onnx import shape_inference
|
33 |
+
from packaging import version
|
34 |
from polygraphy import cuda
|
35 |
from polygraphy.backend.common import bytes_from_path
|
36 |
from polygraphy.backend.onnx.loader import fold_constants
|
|
|
42 |
network_from_onnx_path,
|
43 |
save_engine,
|
44 |
)
|
|
|
45 |
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
|
46 |
|
47 |
+
from diffusers import DiffusionPipeline
|
48 |
+
from diffusers.configuration_utils import FrozenDict, deprecate
|
49 |
+
from diffusers.image_processor import VaeImageProcessor
|
50 |
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
51 |
from diffusers.pipelines.stable_diffusion import (
|
|
|
52 |
StableDiffusionPipelineOutput,
|
53 |
StableDiffusionSafetyChecker,
|
54 |
)
|
55 |
from diffusers.schedulers import DDIMScheduler
|
56 |
from diffusers.utils import logging
|
57 |
+
from diffusers.utils.torch_utils import randn_tensor
|
58 |
|
59 |
|
60 |
"""
|
61 |
Installation instructions
|
62 |
python3 -m pip install --upgrade transformers diffusers>=0.16.0
|
63 |
+
python3 -m pip install --upgrade tensorrt~=10.2.0
|
64 |
python3 -m pip install --upgrade polygraphy>=0.47.0 onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com
|
65 |
python3 -m pip install onnxruntime
|
66 |
"""
|
|
|
90 |
torch_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_torch_dtype_dict.items()}
|
91 |
|
92 |
|
|
|
|
|
|
|
|
|
93 |
class Engine:
|
94 |
def __init__(self, engine_path):
|
95 |
self.engine_path = engine_path
|
|
|
110 |
onnx_path,
|
111 |
fp16,
|
112 |
input_profile=None,
|
|
|
113 |
enable_all_tactics=False,
|
114 |
timing_cache=None,
|
|
|
115 |
):
|
116 |
logger.warning(f"Building TensorRT engine for {onnx_path}: {self.engine_path}")
|
117 |
p = Profile()
|
|
|
120 |
assert len(dims) == 3
|
121 |
p.add(name, min=dims[0], opt=dims[1], max=dims[2])
|
122 |
|
123 |
+
extra_build_args = {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
124 |
if not enable_all_tactics:
|
125 |
+
extra_build_args["tactic_sources"] = []
|
126 |
|
127 |
engine = engine_from_network(
|
128 |
network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]),
|
129 |
+
config=CreateConfig(fp16=fp16, profiles=[p], load_timing_cache=timing_cache, **extra_build_args),
|
130 |
save_timing_cache=timing_cache,
|
131 |
)
|
132 |
save_engine(engine, path=self.engine_path)
|
|
|
139 |
self.context = self.engine.create_execution_context()
|
140 |
|
141 |
def allocate_buffers(self, shape_dict=None, device="cuda"):
|
142 |
+
for binding in range(self.engine.num_io_tensors):
|
143 |
+
name = self.engine.get_tensor_name(binding)
|
144 |
+
if shape_dict and name in shape_dict:
|
145 |
+
shape = shape_dict[name]
|
146 |
else:
|
147 |
+
shape = self.engine.get_tensor_shape(name)
|
148 |
+
dtype = trt.nptype(self.engine.get_tensor_dtype(name))
|
149 |
+
if self.engine.get_tensor_mode(name) == trt.TensorIOMode.INPUT:
|
150 |
+
self.context.set_input_shape(name, shape)
|
151 |
tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device)
|
152 |
+
self.tensors[name] = tensor
|
|
|
153 |
|
154 |
def infer(self, feed_dict, stream):
|
|
|
|
|
|
|
155 |
for name, buf in feed_dict.items():
|
156 |
+
self.tensors[name].copy_(buf)
|
157 |
+
for name, tensor in self.tensors.items():
|
158 |
+
self.context.set_tensor_address(name, tensor.data_ptr())
|
159 |
+
noerror = self.context.execute_async_v3(stream)
|
160 |
if not noerror:
|
161 |
raise ValueError("ERROR: inference failed.")
|
162 |
|
|
|
297 |
force_engine_rebuild=False,
|
298 |
static_batch=False,
|
299 |
static_shape=True,
|
|
|
300 |
enable_all_tactics=False,
|
301 |
timing_cache=None,
|
|
|
302 |
):
|
303 |
built_engines = {}
|
304 |
if not os.path.isdir(onnx_dir):
|
|
|
363 |
static_batch=static_batch,
|
364 |
static_shape=static_shape,
|
365 |
),
|
|
|
366 |
timing_cache=timing_cache,
|
|
|
367 |
)
|
368 |
built_engines[model_name] = engine
|
369 |
|
|
|
571 |
return VAE(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
|
572 |
|
573 |
|
574 |
+
class TensorRTStableDiffusionPipeline(DiffusionPipeline):
|
575 |
r"""
|
576 |
Pipeline for text-to-image generation using TensorRT accelerated Stable Diffusion.
|
577 |
|
578 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
579 |
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
580 |
|
581 |
Args:
|
|
|
599 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
600 |
"""
|
601 |
|
602 |
+
_optional_components = ["safety_checker", "feature_extractor"]
|
603 |
+
|
604 |
def __init__(
|
605 |
self,
|
606 |
vae: AutoencoderKL,
|
|
|
617 |
image_width: int = 768,
|
618 |
max_batch_size: int = 16,
|
619 |
# ONNX export parameters
|
620 |
+
onnx_opset: int = 18,
|
621 |
onnx_dir: str = "onnx",
|
622 |
# TensorRT engine build parameters
|
623 |
engine_dir: str = "engine",
|
|
|
624 |
force_engine_rebuild: bool = False,
|
625 |
timing_cache: str = "timing_cache",
|
626 |
):
|
627 |
+
super().__init__()
|
628 |
+
|
629 |
+
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
630 |
+
deprecation_message = (
|
631 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
632 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
633 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
634 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
635 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
636 |
+
" file"
|
637 |
+
)
|
638 |
+
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
639 |
+
new_config = dict(scheduler.config)
|
640 |
+
new_config["steps_offset"] = 1
|
641 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
642 |
+
|
643 |
+
if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
|
644 |
+
deprecation_message = (
|
645 |
+
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
646 |
+
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
647 |
+
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
648 |
+
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
649 |
+
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
650 |
+
)
|
651 |
+
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
|
652 |
+
new_config = dict(scheduler.config)
|
653 |
+
new_config["clip_sample"] = False
|
654 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
655 |
+
|
656 |
+
if safety_checker is None and requires_safety_checker:
|
657 |
+
logger.warning(
|
658 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
659 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
660 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
661 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
662 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
663 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
664 |
+
)
|
665 |
+
|
666 |
+
if safety_checker is not None and feature_extractor is None:
|
667 |
+
raise ValueError(
|
668 |
+
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
669 |
+
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
670 |
+
)
|
671 |
+
|
672 |
+
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
|
673 |
+
version.parse(unet.config._diffusers_version).base_version
|
674 |
+
) < version.parse("0.9.0.dev0")
|
675 |
+
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
676 |
+
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
677 |
+
deprecation_message = (
|
678 |
+
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
679 |
+
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
680 |
+
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
681 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
|
682 |
+
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
683 |
+
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
684 |
+
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
685 |
+
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
686 |
+
" the `unet/config.json` file"
|
687 |
+
)
|
688 |
+
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
689 |
+
new_config = dict(unet.config)
|
690 |
+
new_config["sample_size"] = 64
|
691 |
+
unet._internal_dict = FrozenDict(new_config)
|
692 |
+
|
693 |
+
self.register_modules(
|
694 |
+
vae=vae,
|
695 |
+
text_encoder=text_encoder,
|
696 |
+
tokenizer=tokenizer,
|
697 |
+
unet=unet,
|
698 |
+
scheduler=scheduler,
|
699 |
safety_checker=safety_checker,
|
700 |
feature_extractor=feature_extractor,
|
701 |
image_encoder=image_encoder,
|
|
|
702 |
)
|
703 |
|
|
|
|
|
704 |
self.stages = stages
|
705 |
self.image_height, self.image_width = image_height, image_width
|
706 |
self.inpaint = False
|
|
|
711 |
self.timing_cache = timing_cache
|
712 |
self.build_static_batch = False
|
713 |
self.build_dynamic_shape = False
|
|
|
714 |
|
715 |
self.max_batch_size = max_batch_size
|
716 |
# TODO: Restrict batch size to 4 for larger image dimensions as a WAR for TensorRT limitation.
|
|
|
721 |
self.models = {} # loaded in __loadModels()
|
722 |
self.engine = {} # loaded in build_engines()
|
723 |
|
724 |
+
self.vae.forward = self.vae.decode
|
725 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
726 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
727 |
+
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
728 |
+
|
729 |
def __loadModels(self):
|
730 |
# Load pipeline models
|
731 |
self.embedding_dim = self.text_encoder.config.hidden_size
|
|
|
742 |
if "vae" in self.stages:
|
743 |
self.models["vae"] = make_VAE(self.vae, **models_args)
|
744 |
|
745 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
746 |
+
def prepare_latents(
|
747 |
+
self,
|
748 |
+
batch_size: int,
|
749 |
+
num_channels_latents: int,
|
750 |
+
height: int,
|
751 |
+
width: int,
|
752 |
+
dtype: torch.dtype,
|
753 |
+
device: torch.device,
|
754 |
+
generator: Union[torch.Generator, List[torch.Generator]],
|
755 |
+
latents: Optional[torch.Tensor] = None,
|
756 |
+
) -> torch.Tensor:
|
757 |
+
r"""
|
758 |
+
Prepare the latent vectors for diffusion.
|
759 |
+
Args:
|
760 |
+
batch_size (int): The number of samples in the batch.
|
761 |
+
num_channels_latents (int): The number of channels in the latent vectors.
|
762 |
+
height (int): The height of the latent vectors.
|
763 |
+
width (int): The width of the latent vectors.
|
764 |
+
dtype (torch.dtype): The data type of the latent vectors.
|
765 |
+
device (torch.device): The device to place the latent vectors on.
|
766 |
+
generator (Union[torch.Generator, List[torch.Generator]]): The generator(s) to use for random number generation.
|
767 |
+
latents (Optional[torch.Tensor]): The pre-existing latent vectors. If None, new latent vectors will be generated.
|
768 |
+
Returns:
|
769 |
+
torch.Tensor: The prepared latent vectors.
|
770 |
+
"""
|
771 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
772 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
773 |
+
raise ValueError(
|
774 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
775 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
776 |
+
)
|
777 |
+
|
778 |
+
if latents is None:
|
779 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
780 |
+
else:
|
781 |
+
latents = latents.to(device)
|
782 |
+
|
783 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
784 |
+
latents = latents * self.scheduler.init_noise_sigma
|
785 |
+
return latents
|
786 |
+
|
787 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
|
788 |
+
def run_safety_checker(
|
789 |
+
self, image: Union[torch.Tensor, PIL.Image.Image], device: torch.device, dtype: torch.dtype
|
790 |
+
) -> Tuple[Union[torch.Tensor, PIL.Image.Image], Optional[bool]]:
|
791 |
+
r"""
|
792 |
+
Runs the safety checker on the given image.
|
793 |
+
Args:
|
794 |
+
image (Union[torch.Tensor, PIL.Image.Image]): The input image to be checked.
|
795 |
+
device (torch.device): The device to run the safety checker on.
|
796 |
+
dtype (torch.dtype): The data type of the input image.
|
797 |
+
Returns:
|
798 |
+
(image, has_nsfw_concept) Tuple[Union[torch.Tensor, PIL.Image.Image], Optional[bool]]: A tuple containing the processed image and
|
799 |
+
a boolean indicating whether the image has a NSFW (Not Safe for Work) concept.
|
800 |
+
"""
|
801 |
+
if self.safety_checker is None:
|
802 |
+
has_nsfw_concept = None
|
803 |
+
else:
|
804 |
+
if torch.is_tensor(image):
|
805 |
+
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
806 |
+
else:
|
807 |
+
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
808 |
+
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
809 |
+
image, has_nsfw_concept = self.safety_checker(
|
810 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
811 |
+
)
|
812 |
+
return image, has_nsfw_concept
|
813 |
+
|
814 |
@classmethod
|
815 |
@validate_hf_hub_args
|
816 |
def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
|
|
|
858 |
force_engine_rebuild=self.force_engine_rebuild,
|
859 |
static_batch=self.build_static_batch,
|
860 |
static_shape=not self.build_dynamic_shape,
|
|
|
861 |
timing_cache=self.timing_cache,
|
862 |
)
|
863 |
|
|
|
888 |
.to(self.torch_device)
|
889 |
)
|
890 |
|
|
|
891 |
# NOTE: output tensor for CLIP must be cloned because it will be overwritten when called again for negative prompt
|
892 |
+
text_embeddings = runEngine(self.engine["clip"], {"input_ids": text_input_ids}, self.stream)[
|
893 |
"text_embeddings"
|
894 |
].clone()
|
895 |
|
|
|
905 |
.input_ids.type(torch.int32)
|
906 |
.to(self.torch_device)
|
907 |
)
|
908 |
+
uncond_embeddings = runEngine(self.engine["clip"], {"input_ids": uncond_input_ids}, self.stream)[
|
|
|
909 |
"text_embeddings"
|
910 |
]
|
911 |
|
|
|
929 |
# Predict the noise residual
|
930 |
timestep_float = timestep.float() if timestep.dtype != torch.float32 else timestep
|
931 |
|
|
|
|
|
|
|
932 |
noise_pred = runEngine(
|
933 |
self.engine["unet"],
|
934 |
+
{"sample": latent_model_input, "timestep": timestep_float, "encoder_hidden_states": text_embeddings},
|
935 |
self.stream,
|
936 |
)["latent"]
|
937 |
|
938 |
# Perform guidance
|
939 |
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
940 |
+
noise_pred = noise_pred_uncond + self._guidance_scale * (noise_pred_text - noise_pred_uncond)
|
941 |
|
942 |
latents = self.scheduler.step(noise_pred, timestep, latents).prev_sample
|
943 |
|
|
|
945 |
return latents
|
946 |
|
947 |
def __decode_latent(self, latents):
|
948 |
+
images = runEngine(self.engine["vae"], {"latent": latents}, self.stream)["images"]
|
949 |
images = (images / 2 + 0.5).clamp(0, 1)
|
950 |
return images.cpu().permute(0, 2, 3, 1).float().numpy()
|
951 |
|
952 |
def __loadResources(self, image_height, image_width, batch_size):
|
953 |
+
self.stream = cudart.cudaStreamCreate()[1]
|
954 |
|
955 |
# Allocate buffers for TensorRT engine bindings
|
956 |
for model_name, obj in self.models.items():
|