Spaces:
Runtime error
Runtime error
File size: 3,501 Bytes
537b454 8d76d4b 537b454 cec57c9 368793a 0be99ec 368793a cec57c9 537b454 368793a f9b9571 368793a 537b454 8d76d4b cec57c9 ff96e27 537b454 ff96e27 537b454 ff96e27 537b454 402ce74 ff96e27 537b454 4ce392f 537b454 384f939 537b454 aa9a434 537b454 4ce392f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
from transformers import MllamaForConditionalGeneration, AutoProcessor, TextIteratorStreamer
from PIL import Image
import requests
import torch
from threading import Thread
import gradio as gr
from gradio import FileData
import time
import spaces
import os
from huggingface_hub import login
login(token = os.getenv('llama32vision'))
ckpt = "meta-llama/Llama-3.2-11B-Vision-Instruct"
# client = InferenceClient(ckpt, provider='novita', token=os.getenv('llama32vision'))
model = MllamaForConditionalGeneration.from_pretrained(ckpt, torch_dtype=torch.bfloat16).to("cuda")
processor = AutoProcessor.from_pretrained(ckpt)
@spaces.GPU
def bot_streaming(message, history, max_new_tokens=250):
txt = message["text"]
ext_buffer = f"{txt}"
messages= []
images = []
for i, msg in enumerate(history):
if isinstance(msg[0], tuple):
messages.append({"role": "user", "content": [{"type": "text", "text": history[i+1][0]}, {"type": "image"}]})
messages.append({"role": "assistant", "content": [{"type": "text", "text": history[i+1][1]}]})
images.append(Image.open(msg[0][0]).convert("RGB"))
elif isinstance(history[i-1], tuple) and isinstance(msg[0], str):
# messages are already handled
pass
elif isinstance(history[i-1][0], str) and isinstance(msg[0], str): # text only turn
messages.append({"role": "user", "content": [{"type": "text", "text": msg[0]}]})
messages.append({"role": "assistant", "content": [{"type": "text", "text": msg[1]}]})
# add current message
if len(message["files"]) == 1:
if isinstance(message["files"][0], str): # examples
image = Image.open(message["files"][0]).convert("RGB")
else: # regular input
image = Image.open(message["files"][0]["path"]).convert("RGB")
images.append(image)
messages.append({"role": "user", "content": [{"type": "text", "text": txt}, {"type": "image"}]})
else:
messages.append({"role": "user", "content": [{"type": "text", "text": txt}]})
texts = processor.apply_chat_template(messages, add_generation_prompt=True)
if images == []:
inputs = processor(text=texts, return_tensors="pt").to("cuda")
else:
inputs = processor(text=texts, images=images, return_tensors="pt").to("cuda")
streamer = TextIteratorStreamer(processor, skip_special_tokens=True, skip_prompt=True)
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=max_new_tokens)
generated_text = ""
thread = Thread(target=model.generate, kwargs=generation_kwargs)
thread.start()
buffer = ""
for new_text in streamer:
buffer += new_text
generated_text_without_prompt = buffer
time.sleep(0.01)
yield buffer
demo = gr.ChatInterface(fn=bot_streaming,
title="Multimodal Llama 3.2 Vision Instruct 11b, by Meta",
textbox=gr.MultimodalTextbox(),
additional_inputs = [gr.Slider(
minimum=10,
maximum=500,
value=250,
step=10,
label="Maximum number of new tokens to generate",
)
],
cache_examples=False,
description="Upload an image, and start chatting about it, or just enter any text into the prompt to start.",
stop_btn="Stop Generation",
fill_height=True,
multimodal=True)
demo.launch(debug=True)
|