Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -52,20 +52,18 @@ def face_bbox_to_square(bbox):
|
|
52 |
return [l0, t0, r0, b0]
|
53 |
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
control_path = f'weights/Kolors-Controlnet-Pose-Tryon'
|
64 |
controlnet = ControlNetModel.from_pretrained( control_path , revision=None).to(dtype=torch.bfloat16)
|
65 |
|
66 |
face_info_generator = FaceInfoGenerator(root_dir = "./")
|
67 |
|
68 |
-
clip_image_encoder = CLIPVisionModelWithProjection.from_pretrained(
|
69 |
clip_image_encoder.to('cuda')
|
70 |
clip_image_processor = CLIPImageProcessor(size = 336, crop_size = 336)
|
71 |
|
@@ -85,7 +83,7 @@ pipe = StableDiffusionXLControlNetImg2ImgPipeline(
|
|
85 |
if hasattr(pipe.unet, 'encoder_hid_proj'):
|
86 |
pipe.unet.text_encoder_hid_proj = pipe.unet.encoder_hid_proj
|
87 |
ip_scale = 0.5
|
88 |
-
pipe.load_ip_adapter_faceid_plus(f'
|
89 |
pipe.set_face_fidelity_scale(ip_scale)
|
90 |
pipe = pipe.to("cuda")
|
91 |
pipe.enable_model_cpu_offload()
|
@@ -205,5 +203,5 @@ with block:
|
|
205 |
run_button = gr.Button(value="Run")
|
206 |
ips = [face_img,pose_img, garm_img, prompt,negative_prompt, n_samples, n_steps, seed]
|
207 |
run_button.click(fn=infer, inputs=ips, outputs=[result_gallery])
|
208 |
-
|
209 |
-
block.launch(server_name='0.0.0.0'
|
|
|
52 |
return [l0, t0, r0, b0]
|
53 |
|
54 |
|
55 |
+
text_encoder = ChatGLMModel.from_pretrained("Kwai-Kolors/Kolors",subfolder="text_encoder").to(dtype=torch.bfloat16)
|
56 |
+
tokenizer = ChatGLMTokenizer.from_pretrained("Kwai-Kolors/Kolors",subfolder="text_encoder")
|
57 |
+
vae = AutoencoderKL.from_pretrained("Kwai-Kolors/Kolors",subfolder="vae",, revision=None).to(dtype=torch.bfloat16)
|
58 |
+
scheduler = EulerDiscreteScheduler.from_pretrained("Kwai-Kolors/Kolors",subfolder="scheduler")
|
59 |
+
unet = UNet2DConditionModel.from_pretrained("Kwai-Kolors/Kolors",subfolder="unet", revision=None).to(dtype=torch.bfloat16)
|
60 |
+
|
61 |
+
control_path = "haowu11/Kolors-Controlnet-Pose-Tryon"
|
|
|
|
|
62 |
controlnet = ControlNetModel.from_pretrained( control_path , revision=None).to(dtype=torch.bfloat16)
|
63 |
|
64 |
face_info_generator = FaceInfoGenerator(root_dir = "./")
|
65 |
|
66 |
+
clip_image_encoder = CLIPVisionModelWithProjection.from_pretrained("Kwai-Kolors/Kolors-IP-Adapter-FaceID-Plus",subfolder="clip-vit-large-patch14-336", ignore_mismatched_sizes=True)
|
67 |
clip_image_encoder.to('cuda')
|
68 |
clip_image_processor = CLIPImageProcessor(size = 336, crop_size = 336)
|
69 |
|
|
|
83 |
if hasattr(pipe.unet, 'encoder_hid_proj'):
|
84 |
pipe.unet.text_encoder_hid_proj = pipe.unet.encoder_hid_proj
|
85 |
ip_scale = 0.5
|
86 |
+
pipe.load_ip_adapter_faceid_plus(f'ipa-faceid-plus.bin', device = 'cuda')
|
87 |
pipe.set_face_fidelity_scale(ip_scale)
|
88 |
pipe = pipe.to("cuda")
|
89 |
pipe.enable_model_cpu_offload()
|
|
|
203 |
run_button = gr.Button(value="Run")
|
204 |
ips = [face_img,pose_img, garm_img, prompt,negative_prompt, n_samples, n_steps, seed]
|
205 |
run_button.click(fn=infer, inputs=ips, outputs=[result_gallery])
|
206 |
+
if __name__ == "__main__":
|
207 |
+
block.launch(server_name='0.0.0.0')
|