prithivMLmods commited on
Commit
8fc2985
·
verified ·
1 Parent(s): 8fd66da

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -4
app.py CHANGED
@@ -7,6 +7,8 @@ import random
7
  import uuid
8
  from typing import Tuple
9
  import numpy as np
 
 
10
 
11
  DESCRIPTION = """##
12
  """
@@ -82,6 +84,8 @@ def generate(
82
  randomize_seed: bool = False,
83
  style_name: str = DEFAULT_STYLE_NAME,
84
  num_inference_steps: int = 30,
 
 
85
  progress=gr.Progress(track_tqdm=True),
86
  ):
87
  positive_prompt, style_negative_prompt = apply_style(style_name, prompt)
@@ -99,6 +103,8 @@ def generate(
99
  seed = int(randomize_seed_fn(seed, randomize_seed))
100
  generator = torch.Generator(device="cuda").manual_seed(seed)
101
 
 
 
102
  images = pipe(
103
  prompt=positive_prompt,
104
  negative_prompt=final_negative_prompt if final_negative_prompt else None,
@@ -106,12 +112,25 @@ def generate(
106
  height=height,
107
  guidance_scale=guidance_scale,
108
  num_inference_steps=num_inference_steps,
109
- num_images_per_prompt=1,
110
  generator=generator,
111
  output_type="pil",
112
  ).images
 
 
 
 
113
  image_paths = [save_image(img) for img in images]
114
- return image_paths, seed
 
 
 
 
 
 
 
 
 
115
 
116
  # Example prompts
117
  examples = [
@@ -148,6 +167,9 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
148
  )
149
  run_button = gr.Button("Run", scale=0, variant="primary")
150
  result = gr.Gallery(label="Result", columns=1, show_label=False, preview=True)
 
 
 
151
 
152
  with gr.Accordion("Advanced options", open=False):
153
  style_selection = gr.Dropdown(
@@ -200,11 +222,19 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
200
  step=1,
201
  value=30,
202
  )
 
 
 
 
 
 
 
 
203
 
204
  gr.Examples(
205
  examples=examples,
206
  inputs=prompt,
207
- outputs=[result, seed],
208
  fn=generate,
209
  cache_examples=False,
210
  )
@@ -235,8 +265,10 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
235
  randomize_seed,
236
  style_selection,
237
  num_inference_steps,
 
 
238
  ],
239
- outputs=[result, seed],
240
  api_name="run",
241
  )
242
 
 
7
  import uuid
8
  from typing import Tuple
9
  import numpy as np
10
+ import time
11
+ import zipfile
12
 
13
  DESCRIPTION = """##
14
  """
 
84
  randomize_seed: bool = False,
85
  style_name: str = DEFAULT_STYLE_NAME,
86
  num_inference_steps: int = 30,
87
+ num_images: int = 1,
88
+ zip_images: bool = False,
89
  progress=gr.Progress(track_tqdm=True),
90
  ):
91
  positive_prompt, style_negative_prompt = apply_style(style_name, prompt)
 
103
  seed = int(randomize_seed_fn(seed, randomize_seed))
104
  generator = torch.Generator(device="cuda").manual_seed(seed)
105
 
106
+ start_time = time.time()
107
+
108
  images = pipe(
109
  prompt=positive_prompt,
110
  negative_prompt=final_negative_prompt if final_negative_prompt else None,
 
112
  height=height,
113
  guidance_scale=guidance_scale,
114
  num_inference_steps=num_inference_steps,
115
+ num_images_per_prompt=num_images,
116
  generator=generator,
117
  output_type="pil",
118
  ).images
119
+
120
+ end_time = time.time()
121
+ duration = end_time - start_time
122
+
123
  image_paths = [save_image(img) for img in images]
124
+
125
+ zip_path = None
126
+ if zip_images:
127
+ zip_name = str(uuid.uuid4()) + ".zip"
128
+ with zipfile.ZipFile(zip_name, 'w') as zipf:
129
+ for i, img_path in enumerate(image_paths):
130
+ zipf.write(img_path, arcname=f"Img_{i}.png")
131
+ zip_path = zip_name
132
+
133
+ return image_paths, seed, f"{duration:.2f}", zip_path
134
 
135
  # Example prompts
136
  examples = [
 
167
  )
168
  run_button = gr.Button("Run", scale=0, variant="primary")
169
  result = gr.Gallery(label="Result", columns=1, show_label=False, preview=True)
170
+ seed_display = gr.Textbox(label="Seed used", interactive=False)
171
+ generation_time = gr.Textbox(label="Generation time (seconds)", interactive=False)
172
+ zip_file = gr.File(label="Download ZIP")
173
 
174
  with gr.Accordion("Advanced options", open=False):
175
  style_selection = gr.Dropdown(
 
222
  step=1,
223
  value=30,
224
  )
225
+ num_images = gr.Slider(
226
+ label="Number of images",
227
+ minimum=1,
228
+ maximum=101,
229
+ step=1,
230
+ value=1,
231
+ )
232
+ zip_images = gr.Checkbox(label="Zip generated images", value=False)
233
 
234
  gr.Examples(
235
  examples=examples,
236
  inputs=prompt,
237
+ outputs=[result, seed_display, generation_time, zip_file],
238
  fn=generate,
239
  cache_examples=False,
240
  )
 
265
  randomize_seed,
266
  style_selection,
267
  num_inference_steps,
268
+ num_images,
269
+ zip_images,
270
  ],
271
+ outputs=[result, seed_display, generation_time, zip_file],
272
  api_name="run",
273
  )
274