prithivMLmods commited on
Commit
e14e6d1
·
verified ·
1 Parent(s): f106055

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +106 -159
app.py CHANGED
@@ -1,27 +1,27 @@
 
 
 
1
  import gradio as gr
2
- import spaces
3
  import numpy as np
4
- import random
5
- from diffusers import DiffusionPipeline
6
- import torch
7
  from PIL import Image
 
 
 
8
 
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
  model_repo_id = "stabilityai/stable-diffusion-3.5-large-turbo"
11
-
12
  torch_dtype = torch.bfloat16 if torch.cuda.is_available() else torch.float32
13
 
14
  pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
15
  pipe = pipe.to(device)
16
-
17
  pipe.load_lora_weights("strangerzonehf/SD3.5-Turbo-Portrait-LoRA", weight_name="SD3.5-Turbo-Portrait.safetensors")
18
- trigger_word = "Turbo Portrait"
19
  pipe.fuse_lora(lora_scale=1.0)
20
 
21
  MAX_SEED = np.iinfo(np.int32).max
22
  MAX_IMAGE_SIZE = 1024
23
 
24
- # Define styles
25
  style_list = [
26
  {
27
  "name": "3840 x 2160",
@@ -45,178 +45,125 @@ style_list = [
45
  },
46
  ]
47
 
48
- STYLE_NAMES = [style["name"] for style in style_list]
49
- DEFAULT_STYLE_NAME = STYLE_NAMES[0]
50
 
51
- grid_sizes = {
52
- "2x1": (2, 1),
53
- "1x2": (1, 2),
54
- "2x2": (2, 2),
55
- "2x3": (2, 3),
56
- "3x2": (3, 2),
57
- "1x1": (1, 1)
58
- }
59
 
60
  @spaces.GPU(duration=60)
61
- def infer(
62
  prompt,
63
- negative_prompt="",
64
- seed=42,
65
- randomize_seed=False,
66
- width=1024,
67
- height=1024,
68
- guidance_scale=7.5,
69
- num_inference_steps=10,
70
- style="Style Zero",
71
- grid_size="1x1",
72
- progress=gr.Progress(track_tqdm=True),
73
  ):
 
 
 
74
  selected_style = next(s for s in style_list if s["name"] == style)
75
  styled_prompt = selected_style["prompt"].format(prompt=prompt)
76
- styled_negative_prompt = selected_style["negative_prompt"]
77
-
78
- if randomize_seed:
79
- seed = random.randint(0, MAX_SEED)
80
-
81
- generator = torch.Generator().manual_seed(seed)
82
-
83
- grid_size_x, grid_size_y = grid_sizes.get(grid_size, (1, 1))
84
- num_images = grid_size_x * grid_size_y
85
-
86
- options = {
87
- "prompt": styled_prompt,
88
- "negative_prompt": styled_negative_prompt,
89
- "guidance_scale": guidance_scale,
90
- "num_inference_steps": num_inference_steps,
91
- "width": width,
92
- "height": height,
93
- "generator": generator,
94
- "num_images_per_prompt": num_images,
95
- }
96
-
97
- torch.cuda.empty_cache() # Clear GPU memory
98
- result = pipe(**options)
99
-
100
- grid_img = Image.new('RGB', (width * grid_size_x, height * grid_size_y))
101
-
102
- for i, img in enumerate(result.images[:num_images]):
103
- grid_img.paste(img, (i % grid_size_x * width, i // grid_size_x * height))
104
-
105
- return grid_img, seed
106
-
107
- examples = [
108
- "A tiny astronaut hatching from an egg on the moon, 4k, planet theme",
109
- "An anime-style illustration of a delicious, golden-brown wiener schnitzel on a plate, served with fresh lemon slices, parsley --style raw5",
110
- "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K, Photo-Realistic",
111
- "A cat holding a sign that says hello world --ar 85:128 --v 6.0 --style raw"
112
- ]
113
-
114
  css = '''
115
- .gradio-container{max-width: 585px !important}
116
- h1{text-align:center}
117
- footer {
118
- visibility: hidden
119
  }
 
 
120
  '''
121
 
122
- with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
123
- with gr.Column(elem_id="col-container"):
124
- gr.Markdown("## SD3.5 Turbo Portrait")
125
-
126
- with gr.Row():
127
- prompt = gr.Text(
128
- label="Prompt",
129
- show_label=False,
130
- max_lines=1,
131
- placeholder="Enter your prompt",
132
- container=False,
133
- )
134
-
135
- run_button = gr.Button("Run", scale=0, variant="primary")
136
-
137
- result = gr.Image(label="Result", show_label=False)
138
-
139
-
140
- with gr.Row(visible=True):
141
- grid_size_selection = gr.Dropdown(
142
- choices=["2x1", "1x2", "2x2", "2x3", "3x2", "1x1"],
143
- value="1x1",
144
- label="Grid Size"
145
- )
146
-
147
- with gr.Accordion("Advanced Settings", open=False):
148
- negative_prompt = gr.Text(
149
- label="Negative prompt",
150
- max_lines=1,
151
- placeholder="Enter a negative prompt",
152
- value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
153
- visible=False,
154
- )
155
-
156
- seed = gr.Slider(
157
- label="Seed",
158
- minimum=0,
159
- maximum=MAX_SEED,
160
- step=1,
161
- value=0,
162
- )
163
 
164
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
 
165
 
 
 
166
  with gr.Row():
167
- width = gr.Slider(
168
- label="Width",
169
- minimum=512,
170
- maximum=MAX_IMAGE_SIZE,
171
- step=32,
172
- value=1024,
173
- )
174
-
175
- height = gr.Slider(
176
- label="Height",
177
- minimum=512,
178
- maximum=MAX_IMAGE_SIZE,
179
- step=32,
180
- value=1024,
181
  )
 
182
 
183
- with gr.Row():
184
- guidance_scale = gr.Slider(
185
- label="Guidance scale",
186
- minimum=0.0,
187
- maximum=7.5,
188
- step=0.1,
189
- value=0.0,
190
- )
191
 
192
- num_inference_steps = gr.Slider(
193
- label="Number of inference steps",
 
194
  minimum=1,
195
- maximum=50,
 
196
  step=1,
197
- value=8,
198
- )
199
-
200
- style_selection = gr.Radio(
201
- show_label=True,
202
- container=True,
203
- interactive=True,
204
- choices=STYLE_NAMES,
205
- value=DEFAULT_STYLE_NAME,
206
- label="Quality Style",
207
  )
 
208
 
209
- gr.Examples(examples=examples,
210
- inputs=[prompt],
211
- outputs=[result, seed],
212
- fn=infer,
213
- cache_examples=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
 
215
  gr.on(
216
- triggers=[run_button.click, prompt.submit],
217
- fn=infer,
218
  inputs=[
219
  prompt,
 
220
  negative_prompt,
221
  seed,
222
  randomize_seed,
@@ -224,11 +171,11 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
224
  height,
225
  guidance_scale,
226
  num_inference_steps,
227
- style_selection,
228
- grid_size_selection,
229
  ],
230
- outputs=[result, seed],
 
231
  )
232
 
233
  if __name__ == "__main__":
234
- demo.launch(ssr_mode=False, show_error=True)
 
1
+ import os
2
+ import random
3
+ import uuid
4
  import gradio as gr
 
5
  import numpy as np
 
 
 
6
  from PIL import Image
7
+ import torch
8
+ from diffusers import DiffusionPipeline
9
+ import spaces
10
 
11
+ # Setup
12
  device = "cuda" if torch.cuda.is_available() else "cpu"
13
  model_repo_id = "stabilityai/stable-diffusion-3.5-large-turbo"
 
14
  torch_dtype = torch.bfloat16 if torch.cuda.is_available() else torch.float32
15
 
16
  pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
17
  pipe = pipe.to(device)
 
18
  pipe.load_lora_weights("strangerzonehf/SD3.5-Turbo-Portrait-LoRA", weight_name="SD3.5-Turbo-Portrait.safetensors")
 
19
  pipe.fuse_lora(lora_scale=1.0)
20
 
21
  MAX_SEED = np.iinfo(np.int32).max
22
  MAX_IMAGE_SIZE = 1024
23
 
24
+ # Style presets
25
  style_list = [
26
  {
27
  "name": "3840 x 2160",
 
45
  },
46
  ]
47
 
48
+ STYLE_NAMES = [s["name"] for s in style_list]
 
49
 
50
+ def randomize_seed_fn(seed, randomize):
51
+ return random.randint(0, MAX_SEED) if randomize else seed
52
+
53
+ def save_image(img):
54
+ filename = str(uuid.uuid4()) + ".png"
55
+ img.save(filename)
56
+ return filename
 
57
 
58
  @spaces.GPU(duration=60)
59
+ def generate_images(
60
  prompt,
61
+ style,
62
+ negative_prompt,
63
+ seed,
64
+ randomize_seed,
65
+ width,
66
+ height,
67
+ guidance_scale,
68
+ num_inference_steps,
69
+ num_images,
70
+ progress=gr.Progress(track_tqdm=True)
71
  ):
72
+ seed = randomize_seed_fn(seed, randomize_seed)
73
+ generator = torch.Generator(device=device).manual_seed(seed)
74
+
75
  selected_style = next(s for s in style_list if s["name"] == style)
76
  styled_prompt = selected_style["prompt"].format(prompt=prompt)
77
+ styled_negative_prompt = selected_style["negative_prompt"] if not negative_prompt else negative_prompt
78
+
79
+ images = []
80
+ for _ in range(num_images):
81
+ image = pipe(
82
+ prompt=styled_prompt,
83
+ negative_prompt=styled_negative_prompt,
84
+ width=width,
85
+ height=height,
86
+ guidance_scale=guidance_scale,
87
+ num_inference_steps=num_inference_steps,
88
+ generator=generator
89
+ ).images[0]
90
+ images.append(image)
91
+
92
+ image_paths = [save_image(img) for img in images]
93
+ return image_paths, seed
94
+
95
+ # CSS & Interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  css = '''
97
+ .gradio-container {
98
+ max-width: 150%;
99
+ margin: 0 auto;
 
100
  }
101
+ h1 { text-align: center; }
102
+ footer { visibility: hidden; }
103
  '''
104
 
105
+ examples = [
106
+ "portrait photo of a futuristic astronaut",
107
+ "macro shot of a water droplet on a leaf",
108
+ "hyper-realistic food photography of a burger",
109
+ "cyberpunk city at night, rain, neon lights",
110
+ "ultra detailed fantasy landscape with dragons",
111
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
 
113
+ with gr.Blocks(css=css, theme="YTheme/GMaterial") as demo:
114
+ gr.Markdown("## SD3.5 Turbo: Text to Image [10-Images]")
115
 
116
+ with gr.Row():
117
+ with gr.Column(scale=1):
118
  with gr.Row():
119
+ prompt = gr.Text(
120
+ show_label=False,
121
+ max_lines=1,
122
+ placeholder="Enter your prompt",
123
+ container=False,
 
 
 
 
 
 
 
 
 
124
  )
125
+ run_button = gr.Button("Run", scale=0, variant="primary")
126
 
127
+ result_gallery = gr.Gallery(show_label=False, format="png", columns=2, object_fit="contain")
 
 
 
 
 
 
 
128
 
129
+ with gr.Accordion("Advanced Settings", open=False):
130
+ num_images = gr.Slider(
131
+ label="Number of Images",
132
  minimum=1,
133
+ maximum=10,
134
+ value=5,
135
  step=1,
 
 
 
 
 
 
 
 
 
 
136
  )
137
+ style = gr.Dropdown(label="Select Style", choices=STYLE_NAMES, value=STYLE_NAMES[0])
138
 
139
+ negative_prompt = gr.Text(
140
+ label="Negative Prompt",
141
+ max_lines=4,
142
+ lines=3,
143
+ value="cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly"
144
+ )
145
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
146
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
147
+ with gr.Row():
148
+ width = gr.Slider(label="Width", minimum=512, maximum=MAX_IMAGE_SIZE, step=64, value=1024)
149
+ height = gr.Slider(label="Height", minimum=512, maximum=MAX_IMAGE_SIZE, step=64, value=1024)
150
+ with gr.Row():
151
+ guidance_scale = gr.Slider(label="Guidance Scale", minimum=1, maximum=15, step=0.5, value=7.5)
152
+ num_inference_steps = gr.Slider(label="Inference Steps", minimum=1, maximum=30, step=1, value=10)
153
+
154
+ with gr.Column(scale=1):
155
+ gr.Examples(
156
+ examples=examples,
157
+ inputs=prompt,
158
+ cache_examples=False,
159
+ )
160
 
161
  gr.on(
162
+ triggers=[prompt.submit, run_button.click],
163
+ fn=generate_images,
164
  inputs=[
165
  prompt,
166
+ style,
167
  negative_prompt,
168
  seed,
169
  randomize_seed,
 
171
  height,
172
  guidance_scale,
173
  num_inference_steps,
174
+ num_images
 
175
  ],
176
+ outputs=[result_gallery, seed],
177
+ api_name="generate"
178
  )
179
 
180
  if __name__ == "__main__":
181
+ demo.queue(max_size=40).launch(ssr_mode=False)