|
import os |
|
import gc |
|
import torch |
|
import torch._dynamo |
|
from typing import TypeAlias |
|
from torch import Generator |
|
from PIL.Image import Image |
|
from diffusers import ( |
|
FluxPipeline, |
|
FluxTransformer2DModel, |
|
AutoencoderTiny, |
|
DiffusionPipeline, |
|
) |
|
from huggingface_hub.constants import HF_HUB_CACHE |
|
from pipelines.models import TextToImageRequest |
|
from torchao.quantization import quantize_, int8_weight_only |
|
from transformers import T5EncoderModel |
|
|
|
torch._dynamo.config.suppress_errors = True |
|
|
|
Pipeline: TypeAlias = FluxPipeline |
|
|
|
torch.backends.cudnn.benchmark = True |
|
torch._inductor.config.conv_1x1_as_mm = True |
|
torch._inductor.config.coordinate_descent_tuning = True |
|
torch._inductor.config.epilogue_fusion = False |
|
torch._inductor.config.coordinate_descent_check_all_directions = True |
|
|
|
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" |
|
os.environ["TOKENIZERS_PARALLELISM"] = "True" |
|
|
|
CHECKPOINT = "winner632/flux1-schnell-int8wo" |
|
REVISION = "d9ff2fc9ad81476d3ef3a5f40d273f0fa5a36f2b" |
|
|
|
|
|
def clear_gpu_cache(): |
|
"""Frees GPU memory to prevent memory leaks.""" |
|
gc.collect() |
|
torch.cuda.empty_cache() |
|
torch.cuda.reset_max_memory_allocated() |
|
torch.cuda.reset_peak_memory_stats() |
|
|
|
|
|
def load_pipeline() -> Pipeline: |
|
"""Loads the diffusion pipeline with quantization and optimizations.""" |
|
clear_gpu_cache() |
|
|
|
transformer_model = FluxTransformer2DModel.from_pretrained( |
|
os.path.join( |
|
HF_HUB_CACHE, |
|
"models--winner632--flux1-schnell-int8wo/snapshots/d9ff2fc9ad81476d3ef3a5f40d273f0fa5a36f2b/transformer", |
|
), |
|
use_safetensors=True, |
|
local_files_only=True, |
|
torch_dtype=torch.bfloat16, |
|
) |
|
|
|
|
|
pipe = FluxPipeline.from_pretrained( |
|
CHECKPOINT, |
|
revision=REVISION, |
|
transformer=transformer_model, |
|
local_files_only=True, |
|
torch_dtype=torch.bfloat16, |
|
).to("cuda") |
|
|
|
pipe.to(memory_format=torch.channels_last) |
|
pipe.transformer = torch.compile(pipe.transformer, mode="reduce-overhead") |
|
|
|
quantize_(pipe.transformer, int8_weight_only()) |
|
quantize_(pipe.vae, int8_weight_only()) |
|
pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead") |
|
|
|
with torch.no_grad(): |
|
for _ in range(5): |
|
pipe( |
|
prompt="onomancy, aftergo, spirantic, Platyhelmia, modificator, drupaceous, jobbernowl, hereness", |
|
width=1024, |
|
height=1024, |
|
guidance_scale=0, |
|
num_inference_steps=4, |
|
max_sequence_length=256, |
|
) |
|
|
|
clear_gpu_cache() |
|
return pipe |
|
|
|
|
|
@torch.no_grad() |
|
def infer( |
|
request: TextToImageRequest, pipeline: Pipeline, generator: Generator |
|
) -> Image: |
|
"""Generates an image from text input using the loaded pipeline.""" |
|
return pipeline( |
|
request.prompt, |
|
generator=generator, |
|
guidance_scale=0e0, |
|
num_inference_steps=4, |
|
max_sequence_length=256, |
|
height=request.height, |
|
width=request.width, |
|
output_type="pil", |
|
).images[0] |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
print("load pipeline...") |
|
diffusion_pipeline = load_pipeline() |
|
|
|
sample_request = TextToImageRequest( |
|
prompt="A futuristic cityscape with neon lights", |
|
height=1024, |
|
width=1024, |
|
) |
|
|
|
generator = torch.Generator(device="cuda").manual_seed(42) |
|
|
|
print("Generating image...") |
|
generated_img = infer(sample_request, diffusion_pipeline, generator) |
|
generated_img.show() |
|
|