Spaces:
Sleeping
Sleeping
import gradio as gr | |
from diffusers import StableDiffusionPipeline | |
import torch | |
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float32) | |
pipe.to("cuda" if torch.cuda.is_available() else "cpu") | |
pipe.safety_checker = lambda images, **kwargs: (images, False) | |
def infer(prompt, guidance_scale, num_inference_steps): | |
with torch.no_grad(): | |
try: | |
image = pipe(prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0] | |
return image | |
except Exception as e: | |
return f"Error: {str(e)}" | |
with gr.Blocks() as demo: | |
gr.Markdown("π Hyper-Sketch") | |
with gr.Row(): | |
prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...", lines=2) | |
with gr.Row(): | |
guidance_scale = gr.Slider(1.0, 20.0, value=7.5, step=0.1, label="Guidance Scale") | |
num_inference_steps = gr.Slider(10, 50, value=20, step=1, label="Inference Steps") # Lowered max steps | |
generate_button = gr.Button("Generate Image") | |
output_image = gr.Image(label="Generated Image", type="pil") | |
generate_button.click(infer, inputs=[prompt, guidance_scale, num_inference_steps], outputs=[output_image]) | |
demo.launch(share=True) | |