File size: 1,273 Bytes
e3ba27c
e1c2d0e
 
e3ba27c
189badd
10485b6
ae7d76f
e1c2d0e
189badd
ca51646
 
189badd
ae7d76f
 
ca51646
 
 
 
 
ae7d76f
189badd
e1c2d0e
3c0e3c5
ae7d76f
e1c2d0e
 
ae7d76f
 
 
ca51646
ae7d76f
 
 
 
 
 
 
 
ca51646
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import gradio as gr
from diffusers import StableDiffusionPipeline
import torch


pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float32)
pipe.to("cuda" if torch.cuda.is_available() else "cpu")


pipe.safety_checker = lambda images, **kwargs: (images, False)


def infer(prompt, guidance_scale, num_inference_steps):
    with torch.no_grad():
        try:
            image = pipe(prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]
            return image
        except Exception as e:
            return f"Error: {str(e)}"


with gr.Blocks() as demo:
    gr.Markdown("πŸš€ Hyper-Sketch")

    with gr.Row():
        prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...", lines=2)

    with gr.Row():
        guidance_scale = gr.Slider(1.0, 20.0, value=7.5, step=0.1, label="Guidance Scale")
        num_inference_steps = gr.Slider(10, 50, value=20, step=1, label="Inference Steps")  # Lowered max steps

    generate_button = gr.Button("Generate Image")
    output_image = gr.Image(label="Generated Image", type="pil") 

    generate_button.click(infer, inputs=[prompt, guidance_scale, num_inference_steps], outputs=[output_image])

demo.launch(share=True)