model_id = "stabilityai/stable-diffusion-xl-base-1.0" scheduler = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler") pipe = StableDiffusionXLPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.float16, revision="fp16") pipe = pipe.to("cuda") def generate_image(prompt: str, negative_prompt: str = "", height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, num_images_per_prompt: int = 1) -> Tuple[Image.Image, str]: generator = torch.Generator(device="cuda").manual_seed(random.randint(0, 2**32 - 1)) image = pipe( prompt=prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, num_images_per_prompt=num_images_per_prompt, generator=generator, ).images[0] image_id = str(uuid.uuid4()) image_path = f"/tmp/{image_id}.png" image.save(image_path) return image, image_path def gradio_interface(): with gr.Blocks(css=css) as demo: gr.Markdown("## Gere imagens usando Stable Diffusion XL") with gr.Row(): with gr.Column(): prompt = gr.Textbox(label="Prompt", placeholder="Digite o prompt aqui...") negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Digite o negative prompt aqui...") run_button = gr.Button("Gerar Imagem") with gr.Column(): result = gr.Image(label="Imagem Gerada") run_button.click( fn=lambda p, np: generate_image(p, np)[0], inputs=[prompt, negative_prompt], outputs=result, ) return demo if __name__ == "__main__": demo = gradio_interface() demo.launch()