Spaces:
Sleeping
Sleeping
File size: 1,833 Bytes
bfd543d d8e57d4 bfd543d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
model_id = "stabilityai/stable-diffusion-xl-base-1.0"
scheduler = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
pipe = StableDiffusionXLPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.float16, revision="fp16")
pipe = pipe.to("cuda")
def generate_image(prompt: str, negative_prompt: str = "", height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, num_images_per_prompt: int = 1) -> Tuple[Image.Image, str]:
generator = torch.Generator(device="cuda").manual_seed(random.randint(0, 2**32 - 1))
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
height=height,
width=width,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
num_images_per_prompt=num_images_per_prompt,
generator=generator,
).images[0]
image_id = str(uuid.uuid4())
image_path = f"/tmp/{image_id}.png"
image.save(image_path)
return image, image_path
def gradio_interface():
with gr.Blocks(css=css) as demo:
gr.Markdown("## Gere imagens usando Stable Diffusion XL")
with gr.Row():
with gr.Column():
prompt = gr.Textbox(label="Prompt", placeholder="Digite o prompt aqui...")
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Digite o negative prompt aqui...")
run_button = gr.Button("Gerar Imagem")
with gr.Column():
result = gr.Image(label="Imagem Gerada")
run_button.click(
fn=lambda p, np: generate_image(p, np)[0],
inputs=[prompt, negative_prompt],
outputs=result,
)
return demo
if __name__ == "__main__":
demo = gradio_interface()
demo.launch() |