Spaces:
Runtime error
Runtime error
Commit
·
8a6d38a
1
Parent(s):
5ae33c3
Update app.py
Browse files
app.py
CHANGED
|
@@ -164,10 +164,13 @@ def run_lora(prompt, negative, lora_scale, selected_state, sdxl_loras, sdxl_lora
|
|
| 164 |
|
| 165 |
full_path_lora = state_dicts[repo_name]["saved_name"]
|
| 166 |
loaded_state_dict = copy.deepcopy(state_dicts[repo_name]["state_dict"])
|
| 167 |
-
cross_attention_kwargs = None
|
| 168 |
if last_lora != repo_name:
|
|
|
|
|
|
|
| 169 |
pipe.load_lora_weights(loaded_state_dict, adapter_name=sdxl_loras[selected_state.index]["repo"])
|
| 170 |
-
pipe.set_adapters([sdxl_loras[selected_state.index]["repo"], "lcm_lora"], adapter_weights=[
|
|
|
|
| 171 |
is_pivotal = sdxl_loras[selected_state.index]["is_pivotal"]
|
| 172 |
if(is_pivotal):
|
| 173 |
#Add the textual inversion embeddings from pivotal tuning models
|
|
@@ -182,7 +185,8 @@ def run_lora(prompt, negative, lora_scale, selected_state, sdxl_loras, sdxl_lora
|
|
| 182 |
prompt=prompt,
|
| 183 |
negative_prompt=negative,
|
| 184 |
num_inference_steps=4,
|
| 185 |
-
guidance_scale=0.5
|
|
|
|
| 186 |
).images[0]
|
| 187 |
|
| 188 |
last_lora = repo_name
|
|
@@ -207,7 +211,8 @@ with gr.Blocks(css="custom.css") as demo:
|
|
| 207 |
gr_sdxl_loras = gr.State(value=sdxl_loras_raw)
|
| 208 |
gr_sdxl_loras_new = gr.State(value=sdxl_loras_raw_new)
|
| 209 |
title = gr.HTML(
|
| 210 |
-
"""<h1><img src="https://i.imgur.com/vT48NAO.png" alt="LoRA">LCM LoRA the Explorer</h1>
|
|
|
|
| 211 |
Combine loading an [LCM LoRA](#) with your favorite SDXL LoRa and run LoRAs in only 4 steps. Check out our blog to see how this works.
|
| 212 |
""",
|
| 213 |
elem_id="title",
|
|
|
|
| 164 |
|
| 165 |
full_path_lora = state_dicts[repo_name]["saved_name"]
|
| 166 |
loaded_state_dict = copy.deepcopy(state_dicts[repo_name]["state_dict"])
|
| 167 |
+
#cross_attention_kwargs = None
|
| 168 |
if last_lora != repo_name:
|
| 169 |
+
#if(last_fused):
|
| 170 |
+
#pipe.unfuse_lora()
|
| 171 |
pipe.load_lora_weights(loaded_state_dict, adapter_name=sdxl_loras[selected_state.index]["repo"])
|
| 172 |
+
pipe.set_adapters([sdxl_loras[selected_state.index]["repo"], "lcm_lora"], adapter_weights=[lora_scale, 1.0])
|
| 173 |
+
#last_fused = True
|
| 174 |
is_pivotal = sdxl_loras[selected_state.index]["is_pivotal"]
|
| 175 |
if(is_pivotal):
|
| 176 |
#Add the textual inversion embeddings from pivotal tuning models
|
|
|
|
| 185 |
prompt=prompt,
|
| 186 |
negative_prompt=negative,
|
| 187 |
num_inference_steps=4,
|
| 188 |
+
guidance_scale=0.5
|
| 189 |
+
|
| 190 |
).images[0]
|
| 191 |
|
| 192 |
last_lora = repo_name
|
|
|
|
| 211 |
gr_sdxl_loras = gr.State(value=sdxl_loras_raw)
|
| 212 |
gr_sdxl_loras_new = gr.State(value=sdxl_loras_raw_new)
|
| 213 |
title = gr.HTML(
|
| 214 |
+
"""<h1><img src="https://i.imgur.com/vT48NAO.png" alt="LoRA"><i>Experimental</i> LCM LoRA the Explorer</h1>
|
| 215 |
+
<br>
|
| 216 |
Combine loading an [LCM LoRA](#) with your favorite SDXL LoRa and run LoRAs in only 4 steps. Check out our blog to see how this works.
|
| 217 |
""",
|
| 218 |
elem_id="title",
|