Testing2 / app.py
DonImages's picture
Update app.py
9ea4f44 verified
raw
history blame
2.53 kB
import torch
import spaces
from diffusers import StableDiffusion3Pipeline
from huggingface_hub import login
import os
import gradio as gr
# Retrieve the token from the environment variable
token = os.getenv("HF_TOKEN") # Hugging Face token from the secret
if token:
login(token=token) # Log in with the retrieved token
else:
raise ValueError("Hugging Face token not found. Please set it as a repository secret in the Space settings.")
# Load the Stable Diffusion 3.5 model with lower precision (float16) if GPU is available
model_id = "stabilityai/stable-diffusion-3.5-large"
pipe = StableDiffusion3Pipeline.from_pretrained(model_id)
# Check if GPU is available, then move the model to the appropriate device
pipe.to('cuda' if torch.cuda.is_available() else 'cpu')
# Define the path to the LoRA model
lora_model_path = "./lora_model.pth" # Assuming the file is saved locally
# Custom method to load and apply LoRA weights to the Stable Diffusion pipeline
def load_lora_model(pipe, lora_model_path):
# Set device to 'cuda' if available, otherwise 'cpu'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# When loading the LoRA weights
lora_weights = torch.load(lora_model_path, map_location=device, weights_only=True)
# Print available attributes of the model to check access to `unet` (optional)
print(dir(pipe)) # This will list all attributes and methods of the `pipe` object
# Apply weights to the UNet submodule
try:
for name, param in pipe.unet.named_parameters(): # Accessing unet parameters
if name in lora_weights:
param.data += lora_weights[name]
except AttributeError:
print("The model doesn't have 'unet' attributes. Please check the model structure.")
# Add alternative handling or exit
return pipe
# Load and apply the LoRA model weights
pipe = load_lora_model(pipe, lora_model_path)
# Use the @space.gpu decorator to ensure compatibility with GPU or CPU as needed
@spaces.gpu
def generate(prompt, seed=None):
generator = torch.manual_seed(seed) if seed is not None else None
# Generate the image using the prompt
image = pipe(prompt, height=512, width=512, generator=generator).images[0]
return image
# Gradio interface
iface = gr.Interface(
fn=generate,
inputs=[
gr.Textbox(label="Enter your prompt"), # For the prompt
gr.Number(label="Enter a seed (optional)", value=None), # For the seed
],
outputs="image"
)
iface.launch()