Batch Inference会报错
Batch Inference会报错,ValueError: You are attempting to perform batched generation with padding_side='right' this may lead to unexpected behaviour for Flash Attention version of Qwen2_5_VL. Make sure to call tokenizer.padding_side = 'left'
before tokenizing the input.
希望能修复一下
代码如下:
import os
import torch
os.environ["CUDA_VISIBLE_DEVICES"] = "6,7"
from transformers import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
"/data/guofeng/test/DL/data/model/Qwen2.5-VL/Qwen2.5-VL-7B-Instruct",
torch_dtype = torch.bfloat16,
attn_implementation = "flash_attention_2",
device_map = "auto",
)
default processer
processor = AutoProcessor.from_pretrained("/data/guofeng/test/DL/data/model/Qwen2.5-VL/Qwen2.5-VL-7B-Instruct")
Sample messages for batch inference
messages1 = [
{
"role":"user",
"content":[
{"type":"image", "image":"/data/guofeng/test/image/skis.png"},
{"type":"image", "image":"/data/guofeng/test/image/soccer.jpg"},
{"type":"text", "text":"What are the common elements in these pictures?"},
],
}
]
messages2 = [
{"role":"system", "content":"You are a helpful assistant."},
{"role":"user", "content":"Who are you?"},
]
Combine messages for batch processing
messages = [messages1, messages2]
Preparation for batch inference
texts = [
processor.apply_chat_template(msg, tokenize = False, add_generation_prompt = True)
for msg in messages
]
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(
text = texts,
images = image_inputs,
videos = video_inputs,
padding = True,
return_tensors = "pt",
)
inputs = inputs.to("cuda")
Batch Inference
generated_ids = model.generate(**inputs, max_new_tokens = 128)
generated_ids_trimmed = [
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_texts = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens = True, clean_up_tokenization_spaces = False
)
print(output_texts)
报错如下:
Loading checkpoint shards: 100%|██████████| 5/5 [00:03<00:00, 1.64it/s]
A decoder-only architecture is being used, but right-padding was detected! For correct generation results, please set padding_side='left'
when initializing the tokenizer.
Traceback (most recent call last):
File "/data/guofeng/anaconda3/envs/4090-1/lib/python3.11/site-packages/IPython/core/interactiveshell.py", line 3577, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "", line 1, in
runfile('/data/guofeng/test/draft.py', args=['--gpu_name', '3090-3', '--cnt_parts', '4', '--current_part', '0', '--current_gpu', '0,1,2,3,4,5,6,7'], wdir='/data/guofeng/test/')
File "/data/guofeng/.pycharm_helpers/pydev/_pydev_bundle/pydev_umd.py", line 197, in runfile
pydev_imports.execfile(filename, global_vars, local_vars) # execute the script
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/data/guofeng/.pycharm_helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "/data/guofeng/test/draft.py", line 53, in
generated_ids = model.generate(**inputs, max_new_tokens = 128)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/data/guofeng/anaconda3/envs/4090-1/lib/python3.11/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/data/guofeng/anaconda3/envs/4090-1/lib/python3.11/site-packages/transformers/generation/utils.py", line 2228, in generate
result = self._sample(
^^^^^^^^^^^^^
File "/data/guofeng/anaconda3/envs/4090-1/lib/python3.11/site-packages/transformers/generation/utils.py", line 3211, in _sample
outputs = self(**model_inputs, return_dict=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/data/guofeng/anaconda3/envs/4090-1/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/data/guofeng/anaconda3/envs/4090-1/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/data/guofeng/anaconda3/envs/4090-1/lib/python3.11/site-packages/accelerate/hooks.py", line 170, in new_forward
output = module._old_forward(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/data/guofeng/anaconda3/envs/4090-1/lib/python3.11/site-packages/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py", line 1808, in forward
outputs = self.model(
^^^^^^^^^^^
File "/data/guofeng/anaconda3/envs/4090-1/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/data/guofeng/anaconda3/envs/4090-1/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/data/guofeng/anaconda3/envs/4090-1/lib/python3.11/site-packages/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py", line 1124, in forward
causal_mask = self._update_causal_mask(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/data/guofeng/anaconda3/envs/4090-1/lib/python3.11/site-packages/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py", line 1203, in _update_causal_mask
raise ValueError(
ValueError: You are attempting to perform batched generation with padding_side='right' this may lead to unexpected behaviour for Flash Attention version of Qwen2_5_VL. Make sure to call tokenizer.padding_side = 'left'
before tokenizing the input.