+ CHECKPOINT=work_dirs/InternVL2-2B + DATASET=mmmu-val ++ pwd + CHECKPOINT=/mnt/petrelfs/wangweiyun/workspace_zyc/VLM-Dev/work_dirs/InternVL2-2B ++ pwd + export PYTHONPATH=/mnt/petrelfs/wangweiyun/workspace_zyc/VLM-Dev:/mnt/petrelfs/wangweiyun/workspace_wwy/pkgs/petrel-oss-sdk-2.3.14:/mnt/petrelfs/share_data/wangweiyun/share_pkgs/petrel-oss-sdk-2.3.12: + PYTHONPATH=/mnt/petrelfs/wangweiyun/workspace_zyc/VLM-Dev:/mnt/petrelfs/wangweiyun/workspace_wwy/pkgs/petrel-oss-sdk-2.3.14:/mnt/petrelfs/share_data/wangweiyun/share_pkgs/petrel-oss-sdk-2.3.12: + echo 'CHECKPOINT: /mnt/petrelfs/wangweiyun/workspace_zyc/VLM-Dev/work_dirs/InternVL2-2B' CHECKPOINT: /mnt/petrelfs/wangweiyun/workspace_zyc/VLM-Dev/work_dirs/InternVL2-2B + MASTER_PORT=63669 + PORT=63665 + GPUS=8 + GPUS_PER_NODE=8 + NODES=1 + export MASTER_PORT=63669 + MASTER_PORT=63669 + export PORT=63665 + PORT=63665 + ARGS=("$@") + [[ 5 -gt 0 ]] + case "$1" in + shift + [[ 4 -gt 0 ]] + case "$1" in + shift + [[ 3 -gt 0 ]] + case "$1" in + shift + [[ 2 -gt 0 ]] + case "$1" in + shift + [[ 1 -gt 0 ]] + case "$1" in + shift + [[ 0 -gt 0 ]] GPUS: 8 + echo 'GPUS: 8' + [[ /mnt/petrelfs/wangweiyun/workspace_zyc/VLM-Dev/work_dirs/InternVL2-2B == */ ]] + '[' mmmu-val == mme ']' + '[' mmmu-val == caption ']' + '[' mmmu-val == caption-coco ']' + '[' mmmu-val == caption-flickr30k ']' + '[' mmmu-val == caption-nocaps ']' + '[' mmmu-val == vqa ']' + '[' mmmu-val == vqa-okvqa-val ']' + '[' mmmu-val == vqa-textvqa-val ']' + '[' mmmu-val == vqa-textvqa-val-ocr ']' + '[' mmmu-val == vqa-vizwiz-val ']' + '[' mmmu-val == vqa-vizwiz-test ']' + '[' mmmu-val == vqa-vqav2-testdev ']' + '[' mmmu-val == vqa-ai2d-test ']' + '[' mmmu-val == vqa-vqav2-val ']' + '[' mmmu-val == vqa-gqa-testdev ']' + '[' mmmu-val == vqa-docvqa-val ']' + '[' mmmu-val == vqa-docvqa-test ']' + '[' mmmu-val == vqa-chartqa-test ']' + '[' mmmu-val == vqa-infovqa-val ']' + '[' mmmu-val == vqa-infovqa-test ']' + '[' mmmu-val == vqa-chartqa-test-human ']' + '[' mmmu-val == vqa-chartqa-test-augmented ']' + '[' mmmu-val == vqa-ocrvqa-val ']' + '[' mmmu-val == vqa-ocrvqa-test ']' + '[' mmmu-val == refcoco ']' + '[' mmmu-val == refcoco-val ']' + '[' mmmu-val == llava-bench ']' + '[' mmmu-val == pope ']' + '[' mmmu-val == tiny_lvlm ']' + '[' mmmu-val == mmvet ']' + '[' mmmu-val == cmmmu ']' + '[' mmmu-val == mmbench-dev-en ']' + '[' mmmu-val == mmbench-dev-cn ']' + '[' mmmu-val == mmbench-test-en ']' + '[' mmmu-val == mmbench-test-cn ']' + '[' mmmu-val == ccbench-dev ']' + '[' mmmu-val == scienceqa ']' + '[' mmmu-val == mmmu-dev ']' + '[' mmmu-val == mmmu-val ']' + torchrun --nnodes=1 --node_rank=0 --master_addr=127.0.0.1 --nproc_per_node=8 --master_port=63669 eval/mmmu/evaluate_mmmu.py --checkpoint /mnt/petrelfs/wangweiyun/workspace_zyc/VLM-Dev/work_dirs/InternVL2-2B --datasets MMMU_validation --dynamic --max-num 6 [2024-08-07 21:56:26,513] torch.distributed.run: [WARNING] [2024-08-07 21:56:26,513] torch.distributed.run: [WARNING] ***************************************** [2024-08-07 21:56:26,513] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. [2024-08-07 21:56:26,513] torch.distributed.run: [WARNING] ***************************************** datasets: ['MMMU_validation'] datasets: ['MMMU_validation'] datasets: ['MMMU_validation'] datasets: ['MMMU_validation'] datasets: ['MMMU_validation'] datasets: ['MMMU_validation'] datasets: ['MMMU_validation'] datasets: ['MMMU_validation'] Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. [test] total_params: 2.205754368B, use num_beams: 1 [test] image_size: 448 [test] template: internlm2-chat [test] dynamic_image_size: True [test] use_thumbnail: True [test] max_num: 6 [test] total_params: 2.205754368B, use num_beams: 1 [test] image_size: 448 [test] template: internlm2-chat [test] dynamic_image_size: True [test] use_thumbnail: True [test] max_num: 6 [test] total_params: 2.205754368B, use num_beams: 1 [test] image_size: 448 [test] template: internlm2-chat [test] dynamic_image_size: True[test] total_params: 2.205754368B, use num_beams: 1 [test] use_thumbnail: True [test] max_num: 6[test] image_size: 448 [test] template: internlm2-chat [test] dynamic_image_size: True [test] use_thumbnail: True [test] max_num: 6 [test] total_params: 2.205754368B, use num_beams: 1 [test] image_size: 448 [test] template: internlm2-chat [test] dynamic_image_size: True [test] use_thumbnail: True [test] max_num: 6 [test] total_params: 2.205754368B, use num_beams: 1 [test] image_size: 448 [test] total_params: 2.205754368B, use num_beams: 1 [test] image_size: 448 [test] template: internlm2-chat [test] template: internlm2-chat [test] dynamic_image_size: True [test] use_thumbnail: True [test] max_num: 6 [test] dynamic_image_size: True [test] use_thumbnail: True [test] max_num: 6 [test] total_params: 2.205754368B, use num_beams: 1 [test] image_size: 448 [test] template: internlm2-chat [test] dynamic_image_size: True [test] use_thumbnail: True [test] max_num: 6 0%| | 0/30 [00:00