Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -55,13 +55,20 @@ def bot_streaming(message, history, max_new_tokens=250):
|
|
| 55 |
else:
|
| 56 |
inputs = processor(text=texts, images=images, return_tensors="pt").to("cuda")
|
| 57 |
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
with torch.no_grad():
|
| 61 |
-
output = model.generate(**inputs, **generation_kwargs)
|
| 62 |
-
output_text = processor.decode(output[0][inputs['input_ids'].shape[1]:]).replace('<|eot_id|>', '')
|
| 63 |
|
| 64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
|
| 66 |
|
| 67 |
demo = gr.ChatInterface(fn=bot_streaming, title="LLaVA-CoT",
|
|
|
|
| 55 |
else:
|
| 56 |
inputs = processor(text=texts, images=images, return_tensors="pt").to("cuda")
|
| 57 |
|
| 58 |
+
streamer = TextIteratorStreamer(processor, skip_special_tokens=True, skip_prompt=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
|
| 60 |
+
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=max_new_tokens)
|
| 61 |
+
generated_text = ""
|
| 62 |
+
|
| 63 |
+
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
| 64 |
+
thread.start()
|
| 65 |
+
buffer = ""
|
| 66 |
+
|
| 67 |
+
for new_text in streamer:
|
| 68 |
+
buffer += new_text
|
| 69 |
+
generated_text_without_prompt = buffer
|
| 70 |
+
time.sleep(0.01)
|
| 71 |
+
yield buffer
|
| 72 |
|
| 73 |
|
| 74 |
demo = gr.ChatInterface(fn=bot_streaming, title="LLaVA-CoT",
|