|  | import torch | 
					
						
						|  | from diffusers import StableDiffusion3Pipeline | 
					
						
						|  | from huggingface_hub import login | 
					
						
						|  | import os | 
					
						
						|  | import gradio as gr | 
					
						
						|  | from diffusers import BitsAndBytesConfig | 
					
						
						|  | from diffusers import SD3Transformer2DModel | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | token = os.getenv("HF_TOKEN") | 
					
						
						|  | if token: | 
					
						
						|  | login(token=token) | 
					
						
						|  | else: | 
					
						
						|  | raise ValueError("Hugging Face token not found. Please set it as a repository secret in the Space settings.") | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | quant_config = BitsAndBytesConfig( | 
					
						
						|  | load_in_4bit=True, | 
					
						
						|  | bnb_4bit_quant_type="nf4", | 
					
						
						|  | bnb_4bit_compute_dtype=torch.bfloat16 | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | model_id = "stabilityai/stable-diffusion-3.5-large" | 
					
						
						|  | model = SD3Transformer2DModel.from_pretrained( | 
					
						
						|  | model_id, | 
					
						
						|  | subfolder="transformer", | 
					
						
						|  | quantization_config=quant_config, | 
					
						
						|  | torch_dtype=torch.bfloat16 | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | pipe = StableDiffusion3Pipeline.from_pretrained( | 
					
						
						|  | model_id, | 
					
						
						|  | transformer=model, | 
					
						
						|  | torch_dtype=torch.bfloat16 | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | pipe.to("cpu") | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | lora_model_path = "./lora_model.pth" | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def load_lora_model(pipe, lora_model_path): | 
					
						
						|  |  | 
					
						
						|  | lora_weights = torch.load(lora_model_path, map_location="cpu") | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | for name, param in pipe.unet.named_parameters(): | 
					
						
						|  | if name in lora_weights: | 
					
						
						|  | param.data += lora_weights[name] | 
					
						
						|  |  | 
					
						
						|  | return pipe | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | pipe = load_lora_model(pipe, lora_model_path) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def generate_image(prompt): | 
					
						
						|  | image = pipe(prompt).images[0] | 
					
						
						|  | return image | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | iface = gr.Interface(fn=generate_image, inputs="text", outputs="image") | 
					
						
						|  | iface.launch() | 
					
						
						|  |  |