import torch from diffusers import StableDiffusion3Pipeline from huggingface_hub import login import os import gradio as gr # Retrieve the token from the environment variable token = os.getenv("HF_TOKEN") # Hugging Face token from the secret if token: login(token=token) # Log in with the retrieved token else: raise ValueError("Hugging Face token not found. Please set it as a repository secret in the Space settings.") # Load the Stable Diffusion 3.5 model model_id = "stabilityai/stable-diffusion-3.5-large" pipe = StableDiffusion3Pipeline.from_pretrained(model_id) # Removed torch_dtype argument pipe.to("cpu") # Ensuring it runs on CPU # Define the path to the LoRA model lora_model_path = "./lora_model.pth" # Assuming the file is saved locally # Custom method to load and apply LoRA weights to the Stable Diffusion pipeline def load_lora_model(pipe, lora_model_path): # Load the LoRA weights lora_weights = torch.load(lora_model_path, map_location="cpu") # Apply weights to the UNet submodule for name, param in pipe.unet.named_parameters(): # Accessing unet parameters if name in lora_weights: param.data += lora_weights[name] return pipe # Load and apply the LoRA model weights pipe = load_lora_model(pipe, lora_model_path) # Function to generate an image from a text prompt def generate_image(prompt, seed=None): generator = torch.manual_seed(seed) if seed is not None else None image = pipe(prompt, height=1080, width=1080, generator=generator).images[0] return image # Gradio interface iface = gr.Interface( fn=generate_image, inputs=[ gr.Textbox(label="Enter your prompt"), # For the prompt gr.Number(label="Enter a seed (optional)", value=None), # For the seed ], outputs="image" ) iface.launch()