Update app.py
Browse files
app.py
CHANGED
@@ -5,30 +5,41 @@ import spaces
|
|
5 |
import random
|
6 |
from diffusers import StableDiffusion3Pipeline
|
7 |
from diffusers.loaders import SD3LoraLoaderMixin
|
|
|
8 |
|
9 |
# Device selection
|
10 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
11 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
12 |
|
13 |
-
# Load
|
14 |
token = os.getenv("HF_TOKEN")
|
15 |
|
16 |
# Model ID for SD 3.5 Large
|
17 |
model_repo_id = "stabilityai/stable-diffusion-3.5-large"
|
18 |
|
19 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
pipeline = StableDiffusion3Pipeline.from_pretrained(
|
21 |
model_repo_id,
|
22 |
torch_dtype=torch_dtype,
|
23 |
use_safetensors=True, # Use safetensors format if supported
|
24 |
).to(device)
|
25 |
|
26 |
-
# Load
|
27 |
-
|
28 |
-
if os.path.exists(lora_path):
|
29 |
try:
|
30 |
-
|
31 |
-
|
|
|
32 |
except Exception as e:
|
33 |
print(f"β Error loading LoRA: {e}")
|
34 |
else:
|
@@ -37,7 +48,7 @@ else:
|
|
37 |
# Verify if LoRA is applied
|
38 |
for name, param in pipeline.text_encoder.named_parameters():
|
39 |
if "lora" in name.lower():
|
40 |
-
print(f"LoRA applied to: {name}, requires_grad={param.requires_grad}")
|
41 |
|
42 |
# Ensure GPU allocation in Hugging Face Spaces
|
43 |
@spaces.GPU(duration=65)
|
|
|
5 |
import random
|
6 |
from diffusers import StableDiffusion3Pipeline
|
7 |
from diffusers.loaders import SD3LoraLoaderMixin
|
8 |
+
from safetensors.torch import load_file, save_file
|
9 |
|
10 |
# Device selection
|
11 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
12 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
13 |
|
14 |
+
# Load Hugging Face token securely
|
15 |
token = os.getenv("HF_TOKEN")
|
16 |
|
17 |
# Model ID for SD 3.5 Large
|
18 |
model_repo_id = "stabilityai/stable-diffusion-3.5-large"
|
19 |
|
20 |
+
# Convert .pt to .safetensors if needed
|
21 |
+
lora_pt_path = "lora_trained_model.pt"
|
22 |
+
lora_safetensors_path = "lora_trained_model.safetensors"
|
23 |
+
|
24 |
+
if os.path.exists(lora_pt_path) and not os.path.exists(lora_safetensors_path):
|
25 |
+
print("π Converting LoRA .pt to .safetensors...")
|
26 |
+
lora_weights = torch.load(lora_pt_path, map_location="cpu")
|
27 |
+
save_file(lora_weights, lora_safetensors_path)
|
28 |
+
print(f"β
LoRA saved as {lora_safetensors_path}")
|
29 |
+
|
30 |
+
# Load Stable Diffusion pipeline
|
31 |
pipeline = StableDiffusion3Pipeline.from_pretrained(
|
32 |
model_repo_id,
|
33 |
torch_dtype=torch_dtype,
|
34 |
use_safetensors=True, # Use safetensors format if supported
|
35 |
).to(device)
|
36 |
|
37 |
+
# Load and fuse LoRA trained weights
|
38 |
+
if os.path.exists(lora_safetensors_path):
|
|
|
39 |
try:
|
40 |
+
pipeline.load_lora_weights(".", weight_name="lora_trained_model.safetensors") # Corrected loading method
|
41 |
+
pipeline.fuse_lora() # Merges LoRA into the base model
|
42 |
+
print("β
LoRA weights loaded and fused successfully!")
|
43 |
except Exception as e:
|
44 |
print(f"β Error loading LoRA: {e}")
|
45 |
else:
|
|
|
48 |
# Verify if LoRA is applied
|
49 |
for name, param in pipeline.text_encoder.named_parameters():
|
50 |
if "lora" in name.lower():
|
51 |
+
print(f"β
LoRA applied to: {name}, requires_grad={param.requires_grad}")
|
52 |
|
53 |
# Ensure GPU allocation in Hugging Face Spaces
|
54 |
@spaces.GPU(duration=65)
|