prithivMLmods commited on
Commit
300607c
·
verified ·
1 Parent(s): 905e633

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -2
app.py CHANGED
@@ -174,6 +174,11 @@ def save_image(img: Image.Image) -> str:
174
  img.save(unique_name)
175
  return unique_name
176
 
 
 
 
 
 
177
  @spaces.GPU
178
  def generate(
179
  input_dict: dict,
@@ -234,9 +239,12 @@ def generate(
234
  torch.cuda.empty_cache()
235
 
236
  selected_pipe = models.get(model_choice, pipe)
 
 
 
 
237
  images = selected_pipe(**options).images
238
  image_path = save_image(images[0])
239
- yield "Generating image..."
240
  yield gr.Image(image_path)
241
  return
242
 
@@ -268,11 +276,14 @@ def generate(
268
  inputs = processor(text=[prompt], images=images, return_tensors="pt", padding=True).to("cuda")
269
  streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
270
  generation_kwargs = {**inputs, "streamer": streamer, "max_new_tokens": max_new_tokens}
 
 
 
 
271
  thread = Thread(target=model_m.generate, kwargs=generation_kwargs)
272
  thread.start()
273
 
274
  buffer = ""
275
- yield "Thinking..."
276
  for new_text in streamer:
277
  buffer += new_text
278
  buffer = buffer.replace("<|im_end|>", "")
@@ -296,6 +307,10 @@ def generate(
296
  "num_beams": 1,
297
  "repetition_penalty": repetition_penalty,
298
  }
 
 
 
 
299
  t = Thread(target=model.generate, kwargs=generation_kwargs)
300
  t.start()
301
 
@@ -327,6 +342,21 @@ h1 {
327
  background: #1565c0;
328
  border-radius: 100vh;
329
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330
  '''
331
 
332
  demo = gr.ChatInterface(
 
174
  img.save(unique_name)
175
  return unique_name
176
 
177
+ # Helper to generate a loading animation component.
178
+ def loading_animation(message="Processing..."):
179
+ # Using Markdown to display an animated GIF for the loading animation.
180
+ return gr.Markdown(f"![Loading](https://media.giphy.com/media/sSgvbe1m3n93G/giphy.gif) {message}")
181
+
182
  @spaces.GPU
183
  def generate(
184
  input_dict: dict,
 
239
  torch.cuda.empty_cache()
240
 
241
  selected_pipe = models.get(model_choice, pipe)
242
+
243
+ # Yield a beautiful loading animation for image generation.
244
+ yield loading_animation("Generating image...")
245
+
246
  images = selected_pipe(**options).images
247
  image_path = save_image(images[0])
 
248
  yield gr.Image(image_path)
249
  return
250
 
 
276
  inputs = processor(text=[prompt], images=images, return_tensors="pt", padding=True).to("cuda")
277
  streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
278
  generation_kwargs = {**inputs, "streamer": streamer, "max_new_tokens": max_new_tokens}
279
+
280
+ # Yield a beautiful loading animation for multimodal generation.
281
+ yield loading_animation("Thinking...")
282
+
283
  thread = Thread(target=model_m.generate, kwargs=generation_kwargs)
284
  thread.start()
285
 
286
  buffer = ""
 
287
  for new_text in streamer:
288
  buffer += new_text
289
  buffer = buffer.replace("<|im_end|>", "")
 
307
  "num_beams": 1,
308
  "repetition_penalty": repetition_penalty,
309
  }
310
+
311
+ # Yield a beautiful loading animation for text generation.
312
+ yield loading_animation("Thinking...")
313
+
314
  t = Thread(target=model.generate, kwargs=generation_kwargs)
315
  t.start()
316
 
 
342
  background: #1565c0;
343
  border-radius: 100vh;
344
  }
345
+
346
+ /* Loading spinner style (if you prefer a CSS spinner instead) */
347
+ .spinner {
348
+ margin: 50px auto;
349
+ width: 50px;
350
+ height: 50px;
351
+ border: 6px solid #ddd;
352
+ border-top: 6px solid #1565c0;
353
+ border-radius: 50%;
354
+ animation: spin 1s linear infinite;
355
+ }
356
+ @keyframes spin {
357
+ 0% { transform: rotate(0deg); }
358
+ 100% { transform: rotate(360deg); }
359
+ }
360
  '''
361
 
362
  demo = gr.ChatInterface(