prithivMLmods commited on
Commit
f393f66
·
verified ·
1 Parent(s): c6ad504

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -222
app.py DELETED
@@ -1,222 +0,0 @@
1
- import os
2
- import random
3
- import uuid
4
- import json
5
- import gradio as gr
6
- import numpy as np
7
- from PIL import Image
8
- import spaces
9
- import torch
10
- from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
11
-
12
- DESCRIPTIONx = """## STABLE HAMSTER 🐹
13
-
14
- """
15
-
16
- css = '''
17
- .gradio-container{max-width: 560px !important}
18
- h1{text-align:center}
19
- footer {
20
- visibility: hidden
21
- }
22
- '''
23
-
24
- examples = [
25
- "3d image, cute girl, in the style of Pixar --ar 1:2 --stylize 750, 4K resolution highlights, Sharp focus, octane render, ray tracing, Ultra-High-Definition, 8k, UHD, HDR, (Masterpiece:1.5), (best quality:1.5)",
26
- "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K",
27
- "Vector illustration of a horse, vector graphic design with flat colors on an brown background in the style of vector art, using simple shapes and graphics with simple details, professionally designed as a tshirt logo ready for print on a white background. --ar 89:82 --v 6.0 --style raw",
28
- "Man in brown leather jacket posing for camera, in the style of sleek and stylized, clockpunk, subtle shades, exacting precision, ferrania p30 --ar 67:101 --v 5",
29
- "Commercial photography, giant burger, white lighting, studio light, 8k octane rendering, high resolution photography, insanely detailed, fine details, on white isolated plain, 8k, commercial photography, stock photo, professional color grading, --v 4 --ar 9:16 "
30
-
31
- ]
32
-
33
-
34
- MODEL_ID = os.getenv("MODEL_VAL_PATH") #Use SDXL Model as "MODEL_REPO" --------->>> ”VALUE”.
35
- MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
36
- USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
37
- ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
38
- BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1")) # Allow generating multiple images at once
39
-
40
- #Load model outside of function
41
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
42
- pipe = StableDiffusionXLPipeline.from_pretrained(
43
- MODEL_ID,
44
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
45
- use_safetensors=True,
46
- add_watermarker=False,
47
- ).to(device)
48
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
49
-
50
- # <compile speedup >
51
- if USE_TORCH_COMPILE:
52
- pipe.compile()
53
-
54
- # Offloading capacity (RAM)
55
- if ENABLE_CPU_OFFLOAD:
56
- pipe.enable_model_cpu_offload()
57
-
58
- MAX_SEED = np.iinfo(np.int32).max
59
-
60
- def save_image(img):
61
- unique_name = str(uuid.uuid4()) + ".png"
62
- img.save(unique_name)
63
- return unique_name
64
-
65
- def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
66
- if randomize_seed:
67
- seed = random.randint(0, MAX_SEED)
68
- return seed
69
-
70
- @spaces.GPU(duration=60, enable_queue=True)
71
- def generate(
72
- prompt: str,
73
- negative_prompt: str = "",
74
- use_negative_prompt: bool = False,
75
- seed: int = 1,
76
- width: int = 1024,
77
- height: int = 1024,
78
- guidance_scale: float = 3,
79
- num_inference_steps: int = 25,
80
- randomize_seed: bool = False,
81
- use_resolution_binning: bool = True,
82
- num_images: int = 1, # Number of images to generate
83
- progress=gr.Progress(track_tqdm=True),
84
- ):
85
- seed = int(randomize_seed_fn(seed, randomize_seed))
86
- generator = torch.Generator(device=device).manual_seed(seed)
87
-
88
- #Options
89
- options = {
90
- "prompt": [prompt] * num_images,
91
- "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
92
- "width": width,
93
- "height": height,
94
- "guidance_scale": guidance_scale,
95
- "num_inference_steps": num_inference_steps,
96
- "generator": generator,
97
- "output_type": "pil",
98
- }
99
-
100
- if use_resolution_binning:
101
- options["use_resolution_binning"] = True
102
-
103
- #Images potential batches
104
- images = []
105
- for i in range(0, num_images, BATCH_SIZE):
106
- batch_options = options.copy()
107
- batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
108
- if "negative_prompt" in batch_options:
109
- batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
110
- images.extend(pipe(**batch_options).images)
111
-
112
- image_paths = [save_image(img) for img in images]
113
- return image_paths, seed
114
-
115
- with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
116
- gr.Markdown(DESCRIPTIONx)
117
- with gr.Group():
118
- with gr.Row():
119
- prompt = gr.Text(
120
- label="Prompt",
121
- show_label=False,
122
- max_lines=1,
123
- placeholder="Enter your prompt",
124
- container=False,
125
- )
126
- run_button = gr.Button("Run", scale=0)
127
- result = gr.Gallery(label="Result", columns=1, show_label=False)
128
- with gr.Accordion("Advanced options", open=False, visible=False):
129
- num_images = gr.Slider(
130
- label="Number of Images",
131
- minimum=1,
132
- maximum=4,
133
- step=1,
134
- value=1,
135
- )
136
- with gr.Row():
137
- use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
138
- negative_prompt = gr.Text(
139
- label="Negative prompt",
140
- max_lines=5,
141
- lines=4,
142
- placeholder="Enter a negative prompt",
143
- value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
144
- visible=True,
145
- )
146
- seed = gr.Slider(
147
- label="Seed",
148
- minimum=0,
149
- maximum=MAX_SEED,
150
- step=1,
151
- value=0,
152
- )
153
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
154
- with gr.Row(visible=True):
155
- width = gr.Slider(
156
- label="Width",
157
- minimum=512,
158
- maximum=MAX_IMAGE_SIZE,
159
- step=64,
160
- value=1024,
161
- )
162
- height = gr.Slider(
163
- label="Height",
164
- minimum=512,
165
- maximum=MAX_IMAGE_SIZE,
166
- step=64,
167
- value=1024,
168
- )
169
- with gr.Row():
170
- guidance_scale = gr.Slider(
171
- label="Guidance Scale",
172
- minimum=0.1,
173
- maximum=6,
174
- step=0.1,
175
- value=3.0,
176
- )
177
- num_inference_steps = gr.Slider(
178
- label="Number of inference steps",
179
- minimum=1,
180
- maximum=25,
181
- step=1,
182
- value=23,
183
- )
184
-
185
- gr.Examples(
186
- examples=examples,
187
- inputs=prompt,
188
- cache_examples=False
189
- )
190
-
191
- use_negative_prompt.change(
192
- fn=lambda x: gr.update(visible=x),
193
- inputs=use_negative_prompt,
194
- outputs=negative_prompt,
195
- api_name=False,
196
- )
197
-
198
- gr.on(
199
- triggers=[
200
- prompt.submit,
201
- negative_prompt.submit,
202
- run_button.click,
203
- ],
204
- fn=generate,
205
- inputs=[
206
- prompt,
207
- negative_prompt,
208
- use_negative_prompt,
209
- seed,
210
- width,
211
- height,
212
- guidance_scale,
213
- num_inference_steps,
214
- randomize_seed,
215
- num_images
216
- ],
217
- outputs=[result, seed],
218
- api_name="run",
219
- )
220
-
221
- if __name__ == "__main__":
222
- demo.queue(max_size=40).launch()