Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -11,36 +11,28 @@ import spaces
|
|
11 |
import torch
|
12 |
from diffusers import StableDiffusion3Pipeline, DPMSolverMultistepScheduler, AutoencoderKL
|
13 |
|
14 |
-
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
|
|
20 |
|
21 |
@spaces.GPU()
|
22 |
def generate(
|
23 |
prompt: str,
|
24 |
negative_prompt: str = "",
|
25 |
-
use_negative_prompt: bool = False,
|
26 |
seed: int = 0,
|
27 |
width: int = 1024,
|
28 |
height: int = 1024,
|
29 |
guidance_scale: float = 7,
|
30 |
-
randomize_seed: bool = False,
|
31 |
num_inference_steps=30,
|
32 |
-
use_resolution_binning: bool = True,
|
33 |
progress=gr.Progress(track_tqdm=True),
|
34 |
):
|
35 |
-
pipe.to(
|
36 |
-
seed = int(randomize_seed_fn(seed, randomize_seed))
|
37 |
generator = torch.Generator().manual_seed(seed)
|
38 |
|
39 |
-
#pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
40 |
-
|
41 |
-
if not use_negative_prompt:
|
42 |
-
negative_prompt = None # type: ignore
|
43 |
-
|
44 |
output = pipe(
|
45 |
prompt=prompt,
|
46 |
negative_prompt=negative_prompt,
|
@@ -57,19 +49,6 @@ def generate(
|
|
57 |
|
58 |
examples = [
|
59 |
"A red sofa on top of a white building.",
|
60 |
-
"A cardboard which is large and sits on a theater stage.",
|
61 |
-
"A painting of an astronaut riding a pig wearing a tutu holding a pink umbrella.",
|
62 |
-
"Studio photograph closeup of a chameleon over a black background.",
|
63 |
-
"Closeup portrait photo of beautiful goth woman, makeup.",
|
64 |
-
"A living room, bright modern Scandinavian style house, large windows.",
|
65 |
-
"Portrait photograph of an anthropomorphic tortoise seated on a New York City subway train.",
|
66 |
-
"Batman, cute modern Disney style, Pixar 3d portrait, ultra detailed, gorgeous, 3d zbrush, trending on dribbble, 8k render.",
|
67 |
-
"Cinnamon bun on the plate, watercolor painting, detailed, brush strokes, light palette, light, cozy.",
|
68 |
-
"A lion, colorful, low-poly, cyan and orange eyes, poly-hd, 3d, low-poly game art, polygon mesh, jagged, blocky, wireframe edges, centered composition.",
|
69 |
-
"Long exposure photo of Tokyo street, blurred motion, streaks of light, surreal, dreamy, ghosting effect, highly detailed.",
|
70 |
-
"A glamorous digital magazine photoshoot, a fashionable model wearing avant-garde clothing, set in a futuristic cyberpunk roof-top environment, with a neon-lit city background, intricate high fashion details, backlit by vibrant city glow, Vogue fashion photography.",
|
71 |
-
"Masterpiece, best quality, girl, collarbone, wavy hair, looking at viewer, blurry foreground, upper body, necklace, contemporary, plain pants, intricate, print, pattern, ponytail, freckles, red hair, dappled sunlight, smile, happy."
|
72 |
-
|
73 |
]
|
74 |
|
75 |
css = '''
|
@@ -82,7 +61,7 @@ with gr.Blocks(css=css) as demo:
|
|
82 |
gr.HTML(
|
83 |
"""
|
84 |
<h1 style='text-align: center'>
|
85 |
-
Stable Diffusion 3 Medium
|
86 |
</h1>
|
87 |
"""
|
88 |
)
|
@@ -104,7 +83,6 @@ with gr.Blocks(css=css) as demo:
|
|
104 |
result = gr.Gallery(label="Result", elem_id="gallery", show_label=False)
|
105 |
with gr.Accordion("Advanced options", open=False):
|
106 |
with gr.Row():
|
107 |
-
use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
|
108 |
negative_prompt = gr.Text(
|
109 |
label="Negative prompt",
|
110 |
max_lines=1,
|
@@ -127,7 +105,6 @@ with gr.Blocks(css=css) as demo:
|
|
127 |
value=30,
|
128 |
)
|
129 |
|
130 |
-
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
131 |
with gr.Row():
|
132 |
guidance_scale = gr.Slider(
|
133 |
label="Guidance Scale",
|
@@ -145,13 +122,6 @@ with gr.Blocks(css=css) as demo:
|
|
145 |
cache_examples=CACHE_EXAMPLES,
|
146 |
)
|
147 |
|
148 |
-
use_negative_prompt.change(
|
149 |
-
fn=lambda x: gr.update(visible=x),
|
150 |
-
inputs=use_negative_prompt,
|
151 |
-
outputs=negative_prompt,
|
152 |
-
api_name=False,
|
153 |
-
)
|
154 |
-
|
155 |
gr.on(
|
156 |
triggers=[
|
157 |
prompt.submit,
|
@@ -162,14 +132,11 @@ with gr.Blocks(css=css) as demo:
|
|
162 |
inputs=[
|
163 |
prompt,
|
164 |
negative_prompt,
|
165 |
-
use_negative_prompt,
|
166 |
seed,
|
167 |
guidance_scale,
|
168 |
-
randomize_seed,
|
169 |
steps,
|
170 |
],
|
171 |
outputs=[result],
|
172 |
-
api_name="run",
|
173 |
)
|
174 |
|
175 |
if __name__ == "__main__":
|
|
|
11 |
import torch
|
12 |
from diffusers import StableDiffusion3Pipeline, DPMSolverMultistepScheduler, AutoencoderKL
|
13 |
|
14 |
+
DESCRIPTION = """# 日本語で入力できるStable Diffusion 3"""
|
15 |
|
16 |
+
pipe = StableDiffusion3Pipeline.from_pretrained(
|
17 |
+
"stabilityai/stable-diffusion-3-medium-diffusers",
|
18 |
+
torch_dtype=torch.float16,
|
19 |
+
token=os.getenv("TOKEN")
|
20 |
+
)
|
21 |
|
22 |
@spaces.GPU()
|
23 |
def generate(
|
24 |
prompt: str,
|
25 |
negative_prompt: str = "",
|
|
|
26 |
seed: int = 0,
|
27 |
width: int = 1024,
|
28 |
height: int = 1024,
|
29 |
guidance_scale: float = 7,
|
|
|
30 |
num_inference_steps=30,
|
|
|
31 |
progress=gr.Progress(track_tqdm=True),
|
32 |
):
|
33 |
+
pipe = pipe.to("cuda")
|
|
|
34 |
generator = torch.Generator().manual_seed(seed)
|
35 |
|
|
|
|
|
|
|
|
|
|
|
36 |
output = pipe(
|
37 |
prompt=prompt,
|
38 |
negative_prompt=negative_prompt,
|
|
|
49 |
|
50 |
examples = [
|
51 |
"A red sofa on top of a white building.",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
]
|
53 |
|
54 |
css = '''
|
|
|
61 |
gr.HTML(
|
62 |
"""
|
63 |
<h1 style='text-align: center'>
|
64 |
+
日本語で入力できるStable Diffusion 3 Medium
|
65 |
</h1>
|
66 |
"""
|
67 |
)
|
|
|
83 |
result = gr.Gallery(label="Result", elem_id="gallery", show_label=False)
|
84 |
with gr.Accordion("Advanced options", open=False):
|
85 |
with gr.Row():
|
|
|
86 |
negative_prompt = gr.Text(
|
87 |
label="Negative prompt",
|
88 |
max_lines=1,
|
|
|
105 |
value=30,
|
106 |
)
|
107 |
|
|
|
108 |
with gr.Row():
|
109 |
guidance_scale = gr.Slider(
|
110 |
label="Guidance Scale",
|
|
|
122 |
cache_examples=CACHE_EXAMPLES,
|
123 |
)
|
124 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
gr.on(
|
126 |
triggers=[
|
127 |
prompt.submit,
|
|
|
132 |
inputs=[
|
133 |
prompt,
|
134 |
negative_prompt,
|
|
|
135 |
seed,
|
136 |
guidance_scale,
|
|
|
137 |
steps,
|
138 |
],
|
139 |
outputs=[result],
|
|
|
140 |
)
|
141 |
|
142 |
if __name__ == "__main__":
|