|
import torch |
|
from PIL import Image |
|
from transformers import AutoModel, AutoTokenizer |
|
|
|
model = AutoModel.from_pretrained('openbmb/MiniCPM-V-2_6', trust_remote_code=True, |
|
attn_implementation='sdpa', torch_dtype=torch.bfloat16) |
|
model = model.eval().cuda() |
|
tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-V-2_6', trust_remote_code=True) |
|
|
|
image = Image.open('car.jpg').convert('RGB') |
|
question = 'What is in the image?' |
|
msgs = [{'role': 'user', 'content': [image, question]}] |
|
|
|
res = model.chat( |
|
image=None, |
|
msgs=msgs, |
|
tokenizer=tokenizer |
|
) |
|
print(res) |
|
|
|
|
|
|
|
res = model.chat( |
|
image=None, |
|
msgs=msgs, |
|
tokenizer=tokenizer, |
|
sampling=True, |
|
stream=True |
|
) |
|
|
|
generated_text = "" |
|
for new_text in res: |
|
generated_text += new_text |
|
print(new_text, flush=True, end='') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|