import torch from diffusers import StableDiffusionPipeline from huggingface_hub import login import os import gradio as gr # Retrieve the token from the environment variable token = os.getenv("HF_TOKEN") # Hugging Face token from the secret if token: login(token=token) # Log in with the retrieved token else: raise ValueError("Hugging Face token not found. Please set it as a repository secret in the Space settings.") # Load the Stable Diffusion 3.5 model model_id = "stabilityai/stable-diffusion-3.5-large" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipe.to("cuda") # Define the path to the LoRA model lora_model_path = "https://huggingface.co/spaces/DonImages/Testing2/resolve/main/lora_model.pth" # LoRA model path # Custom method to load and apply LoRA weights to the Stable Diffusion pipeline def load_lora_model(pipe, lora_model_path): # Load the LoRA weights (assuming it's a PyTorch .pth file) lora_weights = torch.load(lora_model_path, map_location="cuda") # Modify this section based on how LoRA is intended to interact with your Stable Diffusion model # Here, we just load the weights into the model's parameters (this is a conceptual approach) for name, param in pipe.named_parameters(): if name in lora_weights: param.data += lora_weights[name] # Apply LoRA weights to the parameters return pipe # Return the updated model # Load and apply the LoRA model weights pipe = load_lora_model(pipe, lora_model_path) # Function to generate an image from a text prompt def generate_image(prompt): image = pipe(prompt).images[0] return image # Gradio interface iface = gr.Interface(fn=generate_image, inputs="text", outputs="image") iface.launch()