KingNish commited on
Commit
5d0d6a4
·
verified ·
1 Parent(s): e901891

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -50
app.py CHANGED
@@ -16,7 +16,6 @@ DESCRIPTION = """ # Instant Image
16
  ### Super fast text to Image Generator.
17
  ### <span style='color: red;'>You may change the steps from 4 to 8, if you didn't get satisfied results.
18
  ### First Image processing takes time then images generate faster.
19
- ### Must Try -> Instant Video https://huggingface.co/spaces/KingNish/Instant-Video
20
  """
21
  if not torch.cuda.is_available():
22
  DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
@@ -111,11 +110,6 @@ if torch.cuda.is_available():
111
  pipe.transformer = torch.compile(pipe.transformer, mode="reduce-overhead", fullgraph=True)
112
  print("Model Compiled!")
113
 
114
-
115
-
116
-
117
-
118
-
119
  def save_image(img):
120
  unique_name = str(uuid.uuid4()) + ".png"
121
  img.save(unique_name)
@@ -209,14 +203,6 @@ with gr.Blocks(css=css) as demo:
209
  placeholder="Enter a negative prompt",
210
  visible=True,
211
  )
212
-
213
- # num_imgs = gr.Slider(
214
- # label="Num Images",
215
- # minimum=1,
216
- # maximum=8,
217
- # step=1,
218
- # value=1,
219
- # )
220
  style_selection = gr.Radio(
221
  show_label=True,
222
  container=True,
@@ -264,26 +250,6 @@ with gr.Blocks(css=css) as demo:
264
  fn=generate,
265
  cache_examples=CACHE_EXAMPLES,
266
  )
267
-
268
-
269
-
270
-
271
-
272
-
273
-
274
-
275
-
276
-
277
-
278
-
279
-
280
-
281
-
282
-
283
-
284
-
285
-
286
-
287
  use_negative_prompt.change(
288
  fn=lambda x: gr.update(visible=x),
289
  inputs=use_negative_prompt,
@@ -315,19 +281,4 @@ with gr.Blocks(css=css) as demo:
315
  )
316
 
317
  if __name__ == "__main__":
318
- demo.queue(max_size=20).launch()
319
- # demo.queue(max_size=20).launch(server_name="0.0.0.0", server_port=11900, debug=True)
320
-
321
-
322
-
323
-
324
-
325
-
326
-
327
-
328
-
329
-
330
-
331
-
332
-
333
-
 
16
  ### Super fast text to Image Generator.
17
  ### <span style='color: red;'>You may change the steps from 4 to 8, if you didn't get satisfied results.
18
  ### First Image processing takes time then images generate faster.
 
19
  """
20
  if not torch.cuda.is_available():
21
  DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
 
110
  pipe.transformer = torch.compile(pipe.transformer, mode="reduce-overhead", fullgraph=True)
111
  print("Model Compiled!")
112
 
 
 
 
 
 
113
  def save_image(img):
114
  unique_name = str(uuid.uuid4()) + ".png"
115
  img.save(unique_name)
 
203
  placeholder="Enter a negative prompt",
204
  visible=True,
205
  )
 
 
 
 
 
 
 
 
206
  style_selection = gr.Radio(
207
  show_label=True,
208
  container=True,
 
250
  fn=generate,
251
  cache_examples=CACHE_EXAMPLES,
252
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
253
  use_negative_prompt.change(
254
  fn=lambda x: gr.update(visible=x),
255
  inputs=use_negative_prompt,
 
281
  )
282
 
283
  if __name__ == "__main__":
284
+ demo.queue(max_size=20).launch()