Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -23,8 +23,7 @@ def get_lora_sd_pipeline(
|
|
23 |
ckpt_dir='./model_output',
|
24 |
base_model_name_or_path=model_id_default,
|
25 |
dtype=torch_dtype,
|
26 |
-
device=device
|
27 |
-
adapter_name="pusheen"
|
28 |
):
|
29 |
unet_sub_dir = os.path.join(ckpt_dir, "unet")
|
30 |
text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder")
|
@@ -36,11 +35,11 @@ def get_lora_sd_pipeline(
|
|
36 |
raise ValueError("Please specify the base model name or path")
|
37 |
|
38 |
pipe = StableDiffusionPipeline.from_pretrained(base_model_name_or_path, torch_dtype=dtype).to(device)
|
39 |
-
pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir
|
40 |
|
41 |
if os.path.exists(text_encoder_sub_dir):
|
42 |
pipe.text_encoder = PeftModel.from_pretrained(
|
43 |
-
pipe.text_encoder, text_encoder_sub_dir
|
44 |
)
|
45 |
|
46 |
if dtype in (torch.float16, torch.bfloat16):
|
@@ -93,7 +92,7 @@ def infer(
|
|
93 |
generator = torch.Generator(device).manual_seed(seed)
|
94 |
pipe = get_lora_sd_pipeline(base_model_name_or_path=model_id)
|
95 |
pipe = pipe.to(device)
|
96 |
-
pipe.fuse_lora(lora_scale=lora_scale)
|
97 |
pipe.safety_checker = None
|
98 |
# prompt_embeds = encode_prompt(prompt, pipe.tokenizer, pipe.text_encoder)
|
99 |
# negative_prompt_embeds = encode_prompt(negative_prompt, pipe.tokenizer, pipe.text_encoder)
|
|
|
23 |
ckpt_dir='./model_output',
|
24 |
base_model_name_or_path=model_id_default,
|
25 |
dtype=torch_dtype,
|
26 |
+
device=device
|
|
|
27 |
):
|
28 |
unet_sub_dir = os.path.join(ckpt_dir, "unet")
|
29 |
text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder")
|
|
|
35 |
raise ValueError("Please specify the base model name or path")
|
36 |
|
37 |
pipe = StableDiffusionPipeline.from_pretrained(base_model_name_or_path, torch_dtype=dtype).to(device)
|
38 |
+
pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir)
|
39 |
|
40 |
if os.path.exists(text_encoder_sub_dir):
|
41 |
pipe.text_encoder = PeftModel.from_pretrained(
|
42 |
+
pipe.text_encoder, text_encoder_sub_dir
|
43 |
)
|
44 |
|
45 |
if dtype in (torch.float16, torch.bfloat16):
|
|
|
92 |
generator = torch.Generator(device).manual_seed(seed)
|
93 |
pipe = get_lora_sd_pipeline(base_model_name_or_path=model_id)
|
94 |
pipe = pipe.to(device)
|
95 |
+
# pipe.fuse_lora(lora_scale=lora_scale)
|
96 |
pipe.safety_checker = None
|
97 |
# prompt_embeds = encode_prompt(prompt, pipe.tokenizer, pipe.text_encoder)
|
98 |
# negative_prompt_embeds = encode_prompt(negative_prompt, pipe.tokenizer, pipe.text_encoder)
|