Sergidev commited on
Commit
0388d1b
·
verified ·
1 Parent(s): 7117c2e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -47
app.py CHANGED
@@ -38,8 +38,6 @@ torch.backends.cudnn.benchmark = False
38
 
39
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
40
 
41
- # Add a new global variable to store the image history
42
- image_history = []
43
 
44
  def load_pipeline(model_name):
45
  vae = AutoencoderKL.from_pretrained(
@@ -66,6 +64,7 @@ def load_pipeline(model_name):
66
  pipe.to(device)
67
  return pipe
68
 
 
69
  @spaces.GPU
70
  def generate(
71
  prompt: str,
@@ -160,17 +159,7 @@ def generate(
160
  filepath = utils.save_image(image, metadata, OUTPUT_DIR)
161
  logger.info(f"Image saved as {filepath} with metadata")
162
 
163
- # Add the generated image and metadata to the history
164
- for image in images:
165
- thumbnail = image.copy()
166
- thumbnail.thumbnail((256, 256))
167
- image_history.insert(0, {
168
- "image": thumbnail,
169
- "prompt": prompt,
170
- "metadata": metadata
171
- })
172
-
173
- return images, metadata, gr.update(value=image_history)
174
  except Exception as e:
175
  logger.exception(f"An error occurred: {e}")
176
  raise
@@ -180,6 +169,7 @@ def generate(
180
  pipe.scheduler = backup_scheduler
181
  utils.free_memory()
182
 
 
183
  if torch.cuda.is_available():
184
  pipe = load_pipeline(MODEL)
185
  logger.info("Loaded on Device!")
@@ -202,35 +192,24 @@ with gr.Blocks(css="style.css") as demo:
202
  )
203
  with gr.Group():
204
  with gr.Row():
205
- with gr.Column(scale=2):
206
- prompt = gr.Text(
207
- label="Prompt",
208
- show_label=False,
209
- max_lines=5,
210
- placeholder="Enter your prompt",
211
- container=False,
212
- )
213
- run_button = gr.Button(
214
- "Generate",
215
- variant="primary",
216
- scale=0
217
- )
218
- result = gr.Gallery(
219
- label="Result",
220
- columns=1,
221
- preview=True,
222
- show_label=False
223
- )
224
-
225
- with gr.Column(scale=1):
226
- history = gr.Gallery(
227
- label="Generation History",
228
- show_label=True,
229
- elem_id="history",
230
- columns=2,
231
- height=800,
232
- )
233
-
234
  with gr.Accordion(label="Advanced Settings", open=False):
235
  negative_prompt = gr.Text(
236
  label="Negative Prompt",
@@ -311,7 +290,7 @@ with gr.Blocks(css="style.css") as demo:
311
  gr.Examples(
312
  examples=config.examples,
313
  inputs=prompt,
314
- outputs=[result, gr_metadata, history],
315
  fn=lambda *args, **kwargs: generate(*args, use_upscaler=True, **kwargs),
316
  cache_examples=CACHE_EXAMPLES,
317
  )
@@ -354,7 +333,7 @@ with gr.Blocks(css="style.css") as demo:
354
  ).then(
355
  fn=generate,
356
  inputs=inputs,
357
- outputs=[result, gr_metadata, history],
358
  api_name="run",
359
  )
360
  negative_prompt.submit(
@@ -366,7 +345,7 @@ with gr.Blocks(css="style.css") as demo:
366
  ).then(
367
  fn=generate,
368
  inputs=inputs,
369
- outputs=[result, gr_metadata, history],
370
  api_name=False,
371
  )
372
  run_button.click(
@@ -378,8 +357,7 @@ with gr.Blocks(css="style.css") as demo:
378
  ).then(
379
  fn=generate,
380
  inputs=inputs,
381
- outputs=[result, gr_metadata, history],
382
  api_name=False,
383
  )
384
-
385
  demo.queue(max_size=20).launch(debug=IS_COLAB, share=IS_COLAB)
 
38
 
39
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
40
 
 
 
41
 
42
  def load_pipeline(model_name):
43
  vae = AutoencoderKL.from_pretrained(
 
64
  pipe.to(device)
65
  return pipe
66
 
67
+
68
  @spaces.GPU
69
  def generate(
70
  prompt: str,
 
159
  filepath = utils.save_image(image, metadata, OUTPUT_DIR)
160
  logger.info(f"Image saved as {filepath} with metadata")
161
 
162
+ return images, metadata
 
 
 
 
 
 
 
 
 
 
163
  except Exception as e:
164
  logger.exception(f"An error occurred: {e}")
165
  raise
 
169
  pipe.scheduler = backup_scheduler
170
  utils.free_memory()
171
 
172
+
173
  if torch.cuda.is_available():
174
  pipe = load_pipeline(MODEL)
175
  logger.info("Loaded on Device!")
 
192
  )
193
  with gr.Group():
194
  with gr.Row():
195
+ prompt = gr.Text(
196
+ label="Prompt",
197
+ show_label=False,
198
+ max_lines=5,
199
+ placeholder="Enter your prompt",
200
+ container=False,
201
+ )
202
+ run_button = gr.Button(
203
+ "Generate",
204
+ variant="primary",
205
+ scale=0
206
+ )
207
+ result = gr.Gallery(
208
+ label="Result",
209
+ columns=1,
210
+ preview=True,
211
+ show_label=False
212
+ )
 
 
 
 
 
 
 
 
 
 
 
213
  with gr.Accordion(label="Advanced Settings", open=False):
214
  negative_prompt = gr.Text(
215
  label="Negative Prompt",
 
290
  gr.Examples(
291
  examples=config.examples,
292
  inputs=prompt,
293
+ outputs=[result, gr_metadata],
294
  fn=lambda *args, **kwargs: generate(*args, use_upscaler=True, **kwargs),
295
  cache_examples=CACHE_EXAMPLES,
296
  )
 
333
  ).then(
334
  fn=generate,
335
  inputs=inputs,
336
+ outputs=result,
337
  api_name="run",
338
  )
339
  negative_prompt.submit(
 
345
  ).then(
346
  fn=generate,
347
  inputs=inputs,
348
+ outputs=result,
349
  api_name=False,
350
  )
351
  run_button.click(
 
357
  ).then(
358
  fn=generate,
359
  inputs=inputs,
360
+ outputs=[result, gr_metadata],
361
  api_name=False,
362
  )
 
363
  demo.queue(max_size=20).launch(debug=IS_COLAB, share=IS_COLAB)