Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,14 +2,14 @@ import gradio as gr
|
|
2 |
from diffusers import StableDiffusionPipeline
|
3 |
import torch
|
4 |
|
5 |
-
#
|
6 |
-
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4
|
7 |
-
pipe.to("
|
8 |
|
9 |
# Inference function
|
10 |
def infer(prompt, guidance_scale=7.5, num_inference_steps=50):
|
11 |
-
#
|
12 |
-
|
13 |
return image
|
14 |
|
15 |
# Create Gradio Interface
|
@@ -20,9 +20,9 @@ with gr.Blocks() as demo:
|
|
20 |
prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...", lines=2)
|
21 |
generate_button = gr.Button("Generate Image")
|
22 |
|
23 |
-
output_image = gr.Image(label="Generated Image")
|
24 |
|
25 |
generate_button.click(infer, inputs=[prompt], outputs=[output_image])
|
26 |
|
27 |
# Launch the app
|
28 |
-
demo.launch()
|
|
|
2 |
from diffusers import StableDiffusionPipeline
|
3 |
import torch
|
4 |
|
5 |
+
# Load the model correctly
|
6 |
+
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float32)
|
7 |
+
pipe.to("cuda" if torch.cuda.is_available() else "cpu") # Use GPU if available
|
8 |
|
9 |
# Inference function
|
10 |
def infer(prompt, guidance_scale=7.5, num_inference_steps=50):
|
11 |
+
with torch.no_grad(): # Prevent unnecessary gradient calculations
|
12 |
+
image = pipe(prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]
|
13 |
return image
|
14 |
|
15 |
# Create Gradio Interface
|
|
|
20 |
prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...", lines=2)
|
21 |
generate_button = gr.Button("Generate Image")
|
22 |
|
23 |
+
output_image = gr.Image(label="Generated Image", type="pil") # Fix output format
|
24 |
|
25 |
generate_button.click(infer, inputs=[prompt], outputs=[output_image])
|
26 |
|
27 |
# Launch the app
|
28 |
+
demo.launch(share=True) # Allows sharing link
|