Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -174,11 +174,6 @@ def save_image(img: Image.Image) -> str:
|
|
174 |
img.save(unique_name)
|
175 |
return unique_name
|
176 |
|
177 |
-
# Helper to generate a loading animation component.
|
178 |
-
def loading_animation(message="Processing..."):
|
179 |
-
# Using Markdown to display an animated GIF for the loading animation.
|
180 |
-
return gr.Markdown(f" {message}")
|
181 |
-
|
182 |
@spaces.GPU
|
183 |
def generate(
|
184 |
input_dict: dict,
|
@@ -239,12 +234,9 @@ def generate(
|
|
239 |
torch.cuda.empty_cache()
|
240 |
|
241 |
selected_pipe = models.get(model_choice, pipe)
|
242 |
-
|
243 |
-
# Yield a beautiful loading animation for image generation.
|
244 |
-
yield loading_animation("Generating image...")
|
245 |
-
|
246 |
images = selected_pipe(**options).images
|
247 |
image_path = save_image(images[0])
|
|
|
248 |
yield gr.Image(image_path)
|
249 |
return
|
250 |
|
@@ -276,14 +268,11 @@ def generate(
|
|
276 |
inputs = processor(text=[prompt], images=images, return_tensors="pt", padding=True).to("cuda")
|
277 |
streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
|
278 |
generation_kwargs = {**inputs, "streamer": streamer, "max_new_tokens": max_new_tokens}
|
279 |
-
|
280 |
-
# Yield a beautiful loading animation for multimodal generation.
|
281 |
-
yield loading_animation("Thinking...")
|
282 |
-
|
283 |
thread = Thread(target=model_m.generate, kwargs=generation_kwargs)
|
284 |
thread.start()
|
285 |
|
286 |
buffer = ""
|
|
|
287 |
for new_text in streamer:
|
288 |
buffer += new_text
|
289 |
buffer = buffer.replace("<|im_end|>", "")
|
@@ -307,10 +296,6 @@ def generate(
|
|
307 |
"num_beams": 1,
|
308 |
"repetition_penalty": repetition_penalty,
|
309 |
}
|
310 |
-
|
311 |
-
# Yield a beautiful loading animation for text generation.
|
312 |
-
yield loading_animation("Thinking...")
|
313 |
-
|
314 |
t = Thread(target=model.generate, kwargs=generation_kwargs)
|
315 |
t.start()
|
316 |
|
@@ -342,21 +327,6 @@ h1 {
|
|
342 |
background: #1565c0;
|
343 |
border-radius: 100vh;
|
344 |
}
|
345 |
-
|
346 |
-
/* Loading spinner style (if you prefer a CSS spinner instead) */
|
347 |
-
.spinner {
|
348 |
-
margin: 50px auto;
|
349 |
-
width: 50px;
|
350 |
-
height: 50px;
|
351 |
-
border: 6px solid #ddd;
|
352 |
-
border-top: 6px solid #1565c0;
|
353 |
-
border-radius: 50%;
|
354 |
-
animation: spin 1s linear infinite;
|
355 |
-
}
|
356 |
-
@keyframes spin {
|
357 |
-
0% { transform: rotate(0deg); }
|
358 |
-
100% { transform: rotate(360deg); }
|
359 |
-
}
|
360 |
'''
|
361 |
|
362 |
demo = gr.ChatInterface(
|
@@ -370,9 +340,9 @@ demo = gr.ChatInterface(
|
|
370 |
],
|
371 |
examples=[
|
372 |
["@tts1 Who is Nikola Tesla, and why did he die?"],
|
373 |
-
['@lightningv5 Chocolate dripping from a donut against a yellow background, in the style of brocore, hyper-realistic'],
|
374 |
-
['@lightningv4 " serene landscape with mountains'],
|
375 |
-
['@turbov3 Abstract art, colorful and vibrant'],
|
376 |
["Write a Python function to check if a number is prime."],
|
377 |
["@tts2 What causes rainbows to form?"],
|
378 |
],
|
|
|
174 |
img.save(unique_name)
|
175 |
return unique_name
|
176 |
|
|
|
|
|
|
|
|
|
|
|
177 |
@spaces.GPU
|
178 |
def generate(
|
179 |
input_dict: dict,
|
|
|
234 |
torch.cuda.empty_cache()
|
235 |
|
236 |
selected_pipe = models.get(model_choice, pipe)
|
|
|
|
|
|
|
|
|
237 |
images = selected_pipe(**options).images
|
238 |
image_path = save_image(images[0])
|
239 |
+
yield "Generating image ..."
|
240 |
yield gr.Image(image_path)
|
241 |
return
|
242 |
|
|
|
268 |
inputs = processor(text=[prompt], images=images, return_tensors="pt", padding=True).to("cuda")
|
269 |
streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
|
270 |
generation_kwargs = {**inputs, "streamer": streamer, "max_new_tokens": max_new_tokens}
|
|
|
|
|
|
|
|
|
271 |
thread = Thread(target=model_m.generate, kwargs=generation_kwargs)
|
272 |
thread.start()
|
273 |
|
274 |
buffer = ""
|
275 |
+
yield "Thinking..."
|
276 |
for new_text in streamer:
|
277 |
buffer += new_text
|
278 |
buffer = buffer.replace("<|im_end|>", "")
|
|
|
296 |
"num_beams": 1,
|
297 |
"repetition_penalty": repetition_penalty,
|
298 |
}
|
|
|
|
|
|
|
|
|
299 |
t = Thread(target=model.generate, kwargs=generation_kwargs)
|
300 |
t.start()
|
301 |
|
|
|
327 |
background: #1565c0;
|
328 |
border-radius: 100vh;
|
329 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
330 |
'''
|
331 |
|
332 |
demo = gr.ChatInterface(
|
|
|
340 |
],
|
341 |
examples=[
|
342 |
["@tts1 Who is Nikola Tesla, and why did he die?"],
|
343 |
+
['@lightningv5 "Chocolate dripping from a donut against a yellow background, in the style of brocore, hyper-realistic"'],
|
344 |
+
['@lightningv4 "A serene landscape with mountains"'],
|
345 |
+
['@turbov3 "Abstract art, colorful and vibrant"'],
|
346 |
["Write a Python function to check if a number is prime."],
|
347 |
["@tts2 What causes rainbows to form?"],
|
348 |
],
|