Update app.py
Browse files
app.py
CHANGED
@@ -54,7 +54,7 @@ def face_bbox_to_square(bbox):
|
|
54 |
|
55 |
text_encoder = ChatGLMModel.from_pretrained("Kwai-Kolors/Kolors",subfolder="text_encoder").to(dtype=torch.bfloat16)
|
56 |
tokenizer = ChatGLMTokenizer.from_pretrained("Kwai-Kolors/Kolors",subfolder="text_encoder")
|
57 |
-
vae = AutoencoderKL.from_pretrained("Kwai-Kolors/Kolors",subfolder="vae"
|
58 |
scheduler = EulerDiscreteScheduler.from_pretrained("Kwai-Kolors/Kolors",subfolder="scheduler")
|
59 |
unet = UNet2DConditionModel.from_pretrained("Kwai-Kolors/Kolors",subfolder="unet", revision=None).to(dtype=torch.bfloat16)
|
60 |
|
@@ -83,7 +83,7 @@ pipe = StableDiffusionXLControlNetImg2ImgPipeline(
|
|
83 |
if hasattr(pipe.unet, 'encoder_hid_proj'):
|
84 |
pipe.unet.text_encoder_hid_proj = pipe.unet.encoder_hid_proj
|
85 |
ip_scale = 0.5
|
86 |
-
pipe.load_ip_adapter_faceid_plus(
|
87 |
pipe.set_face_fidelity_scale(ip_scale)
|
88 |
pipe = pipe.to("cuda")
|
89 |
pipe.enable_model_cpu_offload()
|
|
|
54 |
|
55 |
text_encoder = ChatGLMModel.from_pretrained("Kwai-Kolors/Kolors",subfolder="text_encoder").to(dtype=torch.bfloat16)
|
56 |
tokenizer = ChatGLMTokenizer.from_pretrained("Kwai-Kolors/Kolors",subfolder="text_encoder")
|
57 |
+
vae = AutoencoderKL.from_pretrained("Kwai-Kolors/Kolors",subfolder="vae", revision=None).to(dtype=torch.bfloat16)
|
58 |
scheduler = EulerDiscreteScheduler.from_pretrained("Kwai-Kolors/Kolors",subfolder="scheduler")
|
59 |
unet = UNet2DConditionModel.from_pretrained("Kwai-Kolors/Kolors",subfolder="unet", revision=None).to(dtype=torch.bfloat16)
|
60 |
|
|
|
83 |
if hasattr(pipe.unet, 'encoder_hid_proj'):
|
84 |
pipe.unet.text_encoder_hid_proj = pipe.unet.encoder_hid_proj
|
85 |
ip_scale = 0.5
|
86 |
+
pipe.load_ip_adapter_faceid_plus('ipa-faceid-plus.bin', device = 'cuda')
|
87 |
pipe.set_face_fidelity_scale(ip_scale)
|
88 |
pipe = pipe.to("cuda")
|
89 |
pipe.enable_model_cpu_offload()
|