Update app.py
Browse files
app.py
CHANGED
@@ -9,20 +9,38 @@ import spaces
|
|
9 |
import torch
|
10 |
from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
|
11 |
|
12 |
-
|
13 |
DESCRIPTIONx = """## STABLE HAMSTER
|
14 |
|
|
|
15 |
"""
|
16 |
|
17 |
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
MODEL_ID = os.getenv("MODEL_REPO")
|
20 |
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
|
21 |
USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
|
22 |
ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
|
23 |
-
BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
|
24 |
|
25 |
-
# Determine device and load model outside of function for efficiency
|
26 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
27 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
28 |
MODEL_ID,
|
@@ -32,16 +50,26 @@ pipe = StableDiffusionXLPipeline.from_pretrained(
|
|
32 |
).to(device)
|
33 |
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
34 |
|
35 |
-
#
|
36 |
if USE_TORCH_COMPILE:
|
37 |
pipe.compile()
|
38 |
|
39 |
-
# CPU offloading for
|
40 |
if ENABLE_CPU_OFFLOAD:
|
41 |
pipe.enable_model_cpu_offload()
|
42 |
|
43 |
MAX_SEED = np.iinfo(np.int32).max
|
44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
def save_image(img):
|
46 |
unique_name = str(uuid.uuid4()) + ".png"
|
47 |
img.save(unique_name)
|
@@ -65,8 +93,10 @@ def generate(
|
|
65 |
randomize_seed: bool = False,
|
66 |
use_resolution_binning: bool = True,
|
67 |
num_images: int = 1, # Number of images to generate
|
|
|
68 |
progress=gr.Progress(track_tqdm=True),
|
69 |
):
|
|
|
70 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
71 |
generator = torch.Generator(device=device).manual_seed(seed)
|
72 |
|
@@ -184,6 +214,14 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
|
184 |
step=1,
|
185 |
value=8,
|
186 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
187 |
|
188 |
gr.Examples(
|
189 |
examples=examples,
|
@@ -215,7 +253,8 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
|
215 |
guidance_scale,
|
216 |
num_inference_steps,
|
217 |
randomize_seed,
|
218 |
-
num_images
|
|
|
219 |
],
|
220 |
outputs=[result, seed],
|
221 |
api_name="run",
|
|
|
9 |
import torch
|
10 |
from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
|
11 |
|
|
|
12 |
DESCRIPTIONx = """## STABLE HAMSTER
|
13 |
|
14 |
+
Drop your best results in the community: [rb.gy/klkbs7](http://rb.gy/klkbs7)
|
15 |
"""
|
16 |
|
17 |
|
18 |
+
style_list = [
|
19 |
+
{
|
20 |
+
"name": "3840 x 2160",
|
21 |
+
"prompt": "hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
|
22 |
+
"negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
|
23 |
+
},
|
24 |
+
{
|
25 |
+
"name": "2560 x 1440",
|
26 |
+
"prompt": "hyper-realistic 4K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
|
27 |
+
"negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"name": "3D Model",
|
31 |
+
"prompt": "professional 3d model {prompt}. octane render, highly detailed, volumetric, dramatic lighting",
|
32 |
+
"negative_prompt": "ugly, deformed, noisy, low poly, blurry, painting",
|
33 |
+
},
|
34 |
+
]
|
35 |
+
|
36 |
+
#User -- Env -- .os -- Mode_Repo
|
37 |
+
|
38 |
MODEL_ID = os.getenv("MODEL_REPO")
|
39 |
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
|
40 |
USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
|
41 |
ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
|
42 |
+
BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
|
43 |
|
|
|
44 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
45 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
46 |
MODEL_ID,
|
|
|
50 |
).to(device)
|
51 |
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
52 |
|
53 |
+
# potential speedup
|
54 |
if USE_TORCH_COMPILE:
|
55 |
pipe.compile()
|
56 |
|
57 |
+
# CPU offloading for Bigger RAM
|
58 |
if ENABLE_CPU_OFFLOAD:
|
59 |
pipe.enable_model_cpu_offload()
|
60 |
|
61 |
MAX_SEED = np.iinfo(np.int32).max
|
62 |
|
63 |
+
styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
|
64 |
+
STYLE_NAMES = list(styles.keys())
|
65 |
+
DEFAULT_STYLE_NAME = "3840 x 2160"
|
66 |
+
|
67 |
+
def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str, str]:
|
68 |
+
p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
|
69 |
+
if not negative:
|
70 |
+
negative = ""
|
71 |
+
return p.replace("{prompt}", positive), n + negative
|
72 |
+
|
73 |
def save_image(img):
|
74 |
unique_name = str(uuid.uuid4()) + ".png"
|
75 |
img.save(unique_name)
|
|
|
93 |
randomize_seed: bool = False,
|
94 |
use_resolution_binning: bool = True,
|
95 |
num_images: int = 1, # Number of images to generate
|
96 |
+
style: str = DEFAULT_STYLE_NAME,
|
97 |
progress=gr.Progress(track_tqdm=True),
|
98 |
):
|
99 |
+
prompt, negative_prompt = apply_style(style, prompt, negative_prompt)
|
100 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
101 |
generator = torch.Generator(device=device).manual_seed(seed)
|
102 |
|
|
|
214 |
step=1,
|
215 |
value=8,
|
216 |
)
|
217 |
+
style_selection = gr.Radio(
|
218 |
+
show_label=True,
|
219 |
+
container=True,
|
220 |
+
interactive=True,
|
221 |
+
choices=STYLE_NAMES,
|
222 |
+
value=DEFAULT_STYLE_NAME,
|
223 |
+
label="Image Style",
|
224 |
+
)
|
225 |
|
226 |
gr.Examples(
|
227 |
examples=examples,
|
|
|
253 |
guidance_scale,
|
254 |
num_inference_steps,
|
255 |
randomize_seed,
|
256 |
+
num_images,
|
257 |
+
style_selection,
|
258 |
],
|
259 |
outputs=[result, seed],
|
260 |
api_name="run",
|