text
stringlengths 0
5.54k
|
---|
controlnet=controlnet, |
torch_dtype=torch.float16, |
safety_checker=None, |
variant="fp16" |
).to("cuda") |
# set scheduler |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) |
# load LCM-LoRA |
pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") |
generator = torch.manual_seed(0) |
image = pipe( |
"the mona lisa", |
image=canny_image, |
num_inference_steps=4, |
guidance_scale=1.5, |
controlnet_conditioning_scale=0.8, |
cross_attention_kwargs={"scale": 1}, |
generator=generator, |
).images[0] |
make_image_grid([canny_image, image], rows=1, cols=2) The inference parameters in this example might not work for all examples, so we recommend you to try different values for `num_inference_steps`, `guidance_scale`, `controlnet_conditioning_scale` and `cross_attention_kwargs` parameters and choose the best one. T2I-Adapter This example shows how to use the LCM-LoRA with the Canny T2I-Adapter and SDXL. Copied import torch |
import cv2 |
import numpy as np |
from PIL import Image |
from diffusers import StableDiffusionXLAdapterPipeline, T2IAdapter, LCMScheduler |
from diffusers.utils import load_image, make_image_grid |
# Prepare image |
# Detect the canny map in low resolution to avoid high-frequency details |
image = load_image( |
"https://huggingface.co/Adapter/t2iadapter/resolve/main/figs_SDXLV1.0/org_canny.jpg" |
).resize((384, 384)) |
image = np.array(image) |
low_threshold = 100 |
high_threshold = 200 |
image = cv2.Canny(image, low_threshold, high_threshold) |
image = image[:, :, None] |
image = np.concatenate([image, image, image], axis=2) |
canny_image = Image.fromarray(image).resize((1024, 1024)) |
# load adapter |
adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-canny-sdxl-1.0", torch_dtype=torch.float16, varient="fp16").to("cuda") |
pipe = StableDiffusionXLAdapterPipeline.from_pretrained( |
"stabilityai/stable-diffusion-xl-base-1.0", |
adapter=adapter, |
torch_dtype=torch.float16, |
variant="fp16", |
).to("cuda") |
# set scheduler |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) |
# load LCM-LoRA |
pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl") |
prompt = "Mystical fairy in real, magic, 4k picture, high quality" |
negative_prompt = "extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured" |
generator = torch.manual_seed(0) |
image = pipe( |
prompt=prompt, |
negative_prompt=negative_prompt, |
image=canny_image, |
num_inference_steps=4, |
guidance_scale=1.5, |
adapter_conditioning_scale=0.8, |
adapter_conditioning_factor=1, |
generator=generator, |
).images[0] |
make_image_grid([canny_image, image], rows=1, cols=2) Inpainting LCM-LoRA can be used for inpainting as well. Copied import torch |
from diffusers import AutoPipelineForInpainting, LCMScheduler |
from diffusers.utils import load_image, make_image_grid |
pipe = AutoPipelineForInpainting.from_pretrained( |
"runwayml/stable-diffusion-inpainting", |
torch_dtype=torch.float16, |
variant="fp16", |
).to("cuda") |
# set scheduler |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) |
# load LCM-LoRA |
pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") |
# load base and mask image |
init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") |
mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") |
# generator = torch.Generator("cuda").manual_seed(92) |
prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" |
generator = torch.manual_seed(0) |
image = pipe( |
Subsets and Splits