|
import torch |
|
from diffusers import StableDiffusionPipeline |
|
from huggingface_hub import login |
|
import os |
|
import gradio as gr |
|
|
|
|
|
token = os.getenv("HF_TOKEN") |
|
if token: |
|
login(token=token) |
|
else: |
|
raise ValueError("Hugging Face token not found. Please set it as a repository secret in the Space settings.") |
|
|
|
|
|
model_id = "stabilityai/stable-diffusion-3.5-large" |
|
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) |
|
pipe.to("cuda") |
|
|
|
|
|
lora_model_path = "https://huggingface.co/spaces/DonImages/Testing2/resolve/main/lora_model.pth" |
|
|
|
|
|
def load_lora_model(pipe, lora_model_path): |
|
|
|
lora_weights = torch.load(lora_model_path, map_location="cuda") |
|
|
|
|
|
|
|
for name, param in pipe.named_parameters(): |
|
if name in lora_weights: |
|
param.data += lora_weights[name] |
|
|
|
return pipe |
|
|
|
|
|
pipe = load_lora_model(pipe, lora_model_path) |
|
|
|
|
|
def generate_image(prompt): |
|
image = pipe(prompt).images[0] |
|
return image |
|
|
|
|
|
iface = gr.Interface(fn=generate_image, inputs="text", outputs="image") |
|
iface.launch() |
|
|