Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -65,37 +65,27 @@ def generate(prompt, negative_prompt, width=1024, height=1024, num_inference_ste
|
|
65 |
# pipe.unload_lora_weights()
|
66 |
# pipe.load_lora_weights(lora_id.strip())
|
67 |
|
68 |
-
|
69 |
-
|
70 |
-
active_adapters = []
|
71 |
-
adapter_weights = []
|
72 |
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
|
78 |
-
# Handle the user-provided custom LoRA
|
79 |
-
clean_lora_id = lora_id.strip() if lora_id else ""
|
80 |
-
if clean_lora_id:
|
81 |
-
try:
|
82 |
-
# If the requested LoRA is different from the one in memory, swap it
|
83 |
-
if clean_lora_id != CURRENTLY_LOADED_CUSTOM_LORA:
|
84 |
-
print(f"Switching custom LoRA to: {clean_lora_id}")
|
85 |
-
# Unload the old custom LoRA to save memory
|
86 |
-
if CURRENTLY_LOADED_CUSTOM_LORA is not None:
|
87 |
-
pipe.unload_lora_weights(CUSTOM_LORA_NAME)
|
88 |
-
|
89 |
-
# Load the new one with its unique name
|
90 |
-
pipe.load_lora_weights(clean_lora_id, adapter_name=CUSTOM_LORA_NAME)
|
91 |
-
CURRENTLY_LOADED_CUSTOM_LORA = clean_lora_id
|
92 |
-
|
93 |
-
# Add the custom LoRA to the active list for this generation
|
94 |
-
active_adapters.append(CUSTOM_LORA_NAME)
|
95 |
-
adapter_weights.append(1.0) # Strength for custom LoRA
|
96 |
-
|
97 |
-
except Exception as e:
|
98 |
-
print(f"⚠️ Failed to load custom LoRA '{clean_lora_id}'. Error: {e}")
|
99 |
|
100 |
pipe.to("cuda")
|
101 |
# apply_first_block_cache(pipe.transformer, FirstBlockCacheConfig(threshold=0.2))
|
|
|
65 |
# pipe.unload_lora_weights()
|
66 |
# pipe.load_lora_weights(lora_id.strip())
|
67 |
|
68 |
+
clean_lora_id = lora_id.strip() if lora_id else ""
|
|
|
|
|
|
|
69 |
|
70 |
+
try:
|
71 |
+
# --- IF LORA ID IS NONE, USE ONE LORA ---
|
72 |
+
if not clean_lora_id:
|
73 |
+
print("No custom LoRA provided. Using ONLY the base LoRA.")
|
74 |
+
# Activate just the default LoRA
|
75 |
+
pipe.set_adapters([DEFAULT_LORA_NAME], adapter_weights=[1.0])
|
76 |
+
|
77 |
+
# --- OTHERWISE, LOAD AND USE TWO LORAS ---
|
78 |
+
else:
|
79 |
+
print(f"Custom LoRA provided. Loading '{clean_lora_id}' and combining with base LoRA.")
|
80 |
+
# Load the custom LoRA fresh for this run
|
81 |
+
pipe.load_lora_weights(clean_lora_id, adapter_name=CUSTOM_LORA_NAME)
|
82 |
+
|
83 |
+
# Activate BOTH LoRAs together
|
84 |
+
pipe.set_adapters(
|
85 |
+
[DEFAULT_LORA_NAME, CUSTOM_LORA_NAME],
|
86 |
+
adapter_weights=[1.0, 1.0] # Strength for base, strength for custom
|
87 |
+
)
|
88 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
|
90 |
pipe.to("cuda")
|
91 |
# apply_first_block_cache(pipe.transformer, FirstBlockCacheConfig(threshold=0.2))
|