ktrndy commited on
Commit
a473611
·
verified ·
1 Parent(s): 2ca97fb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -24,8 +24,7 @@ def get_lora_sd_pipeline(
24
  base_model_name_or_path=model_id_default,
25
  dtype=torch_dtype,
26
  device=device,
27
- adapter_name="default",
28
- lora_scale=1.0
29
  ):
30
  unet_sub_dir = os.path.join(ckpt_dir, "unet")
31
  text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder")
@@ -39,7 +38,6 @@ def get_lora_sd_pipeline(
39
  pipe = StableDiffusionPipeline.from_pretrained(base_model_name_or_path, torch_dtype=dtype).to(device)
40
  pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name)
41
  pipe.unet.set_adapter(adapter_name)
42
- pipe.fuse_lora(lora_scale=lora_scale)
43
 
44
  if os.path.exists(text_encoder_sub_dir):
45
  pipe.text_encoder = PeftModel.from_pretrained(
@@ -97,6 +95,7 @@ def infer(
97
  pipe = get_lora_sd_pipeline(base_model_name_or_path=model_id,
98
  adapter_name="sticker_of_funny_cat_Pusheen")
99
  pipe = pipe.to(device)
 
100
  # prompt_embeds = encode_prompt(prompt, pipe.tokenizer, pipe.text_encoder)
101
  # negative_prompt_embeds = encode_prompt(negative_prompt, pipe.tokenizer, pipe.text_encoder)
102
 
 
24
  base_model_name_or_path=model_id_default,
25
  dtype=torch_dtype,
26
  device=device,
27
+ adapter_name="default"
 
28
  ):
29
  unet_sub_dir = os.path.join(ckpt_dir, "unet")
30
  text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder")
 
38
  pipe = StableDiffusionPipeline.from_pretrained(base_model_name_or_path, torch_dtype=dtype).to(device)
39
  pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name)
40
  pipe.unet.set_adapter(adapter_name)
 
41
 
42
  if os.path.exists(text_encoder_sub_dir):
43
  pipe.text_encoder = PeftModel.from_pretrained(
 
95
  pipe = get_lora_sd_pipeline(base_model_name_or_path=model_id,
96
  adapter_name="sticker_of_funny_cat_Pusheen")
97
  pipe = pipe.to(device)
98
+ pipe.fuse_lora(lora_scale=lora_scale)
99
  # prompt_embeds = encode_prompt(prompt, pipe.tokenizer, pipe.text_encoder)
100
  # negative_prompt_embeds = encode_prompt(negative_prompt, pipe.tokenizer, pipe.text_encoder)
101