prithivMLmods commited on
Commit
2ef29f4
·
verified ·
1 Parent(s): f6ce935

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -26
app.py CHANGED
@@ -129,26 +129,27 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
129
  seed = random.randint(0, MAX_SEED)
130
  return seed
131
 
132
- def generate_thinking_html(buffer: str) -> str:
133
  """
134
- Return an HTML snippet with a "Thinking..." label, an animated progress bar,
135
- and the current buffered text.
136
  """
137
- return f'''
138
- <div style="display: flex; align-items: center;">
139
- <span style="margin-right: 10px; font-weight: bold;">Thinking...</span>
140
- <div style="flex: 1; margin-right: 10px; white-space: pre-wrap;">{buffer}</div>
141
- <div style="width: 110px; height: 5px; background: #e0e0e0; position: relative; overflow: hidden;">
142
- <div style="width: 100%; height: 100%; background: #1890ff; animation: progressAnimation 1.5s linear infinite;"></div>
143
- </div>
144
  </div>
145
- <style>
146
- @keyframes progressAnimation {{
147
- 0% {{ transform: translateX(-100%); }}
148
- 100% {{ transform: translateX(100%); }}
149
- }}
150
- </style>
151
- '''
 
 
 
 
152
 
153
  @spaces.GPU(duration=60, enable_queue=True)
154
  def generate_image_fn(
@@ -255,6 +256,7 @@ def generate(
255
  conversation = clean_chat_history(chat_history)
256
  conversation.append({"role": "user", "content": text})
257
 
 
258
  if files:
259
  if len(files) > 1:
260
  images = [load_image(image) for image in files]
@@ -277,14 +279,15 @@ def generate(
277
  thread.start()
278
 
279
  buffer = ""
280
- # Initial yield: progress bar with no text yet.
281
- yield gr.HTML(generate_thinking_html(buffer))
282
  for new_text in streamer:
283
  buffer += new_text
284
  buffer = buffer.replace("<|im_end|>", "")
285
  time.sleep(0.01)
286
- yield gr.HTML(generate_thinking_html(buffer))
287
  else:
 
288
  input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt")
289
  if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
290
  input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
@@ -305,17 +308,17 @@ def generate(
305
  t = Thread(target=model.generate, kwargs=generation_kwargs)
306
  t.start()
307
 
 
 
 
308
  outputs = []
309
- # Initial yield: progress bar with no text yet.
310
- yield gr.HTML(generate_thinking_html(""))
311
  for new_text in streamer:
312
  outputs.append(new_text)
313
- current_text = "".join(outputs)
314
- yield gr.HTML(generate_thinking_html(current_text))
315
 
316
  final_response = "".join(outputs)
317
- # Final update: yield the final response as plain text.
318
- yield final_response
319
 
320
  # If TTS was requested, convert the final response to speech.
321
  if is_tts and voice:
 
129
  seed = random.randint(0, MAX_SEED)
130
  return seed
131
 
132
+ def progress_with_text(text):
133
  """
134
+ Returns an HTML snippet that shows an animated progress bar along with the given text.
 
135
  """
136
+ return f"""
137
+ <div style="display: flex; align-items: center;">
138
+ <span style="margin-right: 10px;">Thinking...</span>
139
+ <div style="width: 110px; height: 5px; background-color: #ddd; overflow: hidden; position: relative; margin-left: 10px;">
140
+ <div style="width: 50%; height: 100%; background-color: #1565c0; animation: loading 1.5s linear infinite;"></div>
 
 
141
  </div>
142
+ </div>
143
+ <div style="margin-top: 10px;">
144
+ {text}
145
+ </div>
146
+ <style>
147
+ @keyframes loading {{
148
+ 0% {{ transform: translateX(-50%); }}
149
+ 100% {{ transform: translateX(100%); }}
150
+ }}
151
+ </style>
152
+ """
153
 
154
  @spaces.GPU(duration=60, enable_queue=True)
155
  def generate_image_fn(
 
256
  conversation = clean_chat_history(chat_history)
257
  conversation.append({"role": "user", "content": text})
258
 
259
+ # For multimodal input with image files
260
  if files:
261
  if len(files) > 1:
262
  images = [load_image(image) for image in files]
 
279
  thread.start()
280
 
281
  buffer = ""
282
+ # Yield the initial animated progress bar with no text yet.
283
+ yield gr.HTML(progress_with_text(""))
284
  for new_text in streamer:
285
  buffer += new_text
286
  buffer = buffer.replace("<|im_end|>", "")
287
  time.sleep(0.01)
288
+ yield gr.HTML(progress_with_text(buffer))
289
  else:
290
+ # For text-only conversation
291
  input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt")
292
  if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
293
  input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
 
308
  t = Thread(target=model.generate, kwargs=generation_kwargs)
309
  t.start()
310
 
311
+ buffer = ""
312
+ # Yield the initial animated progress bar with no text yet.
313
+ yield gr.HTML(progress_with_text(""))
314
  outputs = []
 
 
315
  for new_text in streamer:
316
  outputs.append(new_text)
317
+ buffer = "".join(outputs)
318
+ yield gr.HTML(progress_with_text(buffer))
319
 
320
  final_response = "".join(outputs)
321
+ yield gr.HTML(progress_with_text(final_response))
 
322
 
323
  # If TTS was requested, convert the final response to speech.
324
  if is_tts and voice: