| SPLIT="mmbench_dev_cn_20231003" | |
| python -m llava.eval.model_vqa_mmbench \ | |
| --model-path /mnt/petrelfs/zhuchenglin/LLaVA/checkpoints_ft/coco_raw_200k/llava-v1.5-13b \ | |
| --question-file ./playground/data/eval/mmbench_cn/$SPLIT.tsv \ | |
| --answers-file ./playground/data/eval/mmbench_cn/answers/$SPLIT/llava-v1.5-13b_coco_raw_200k_ft_200k.jsonl \ | |
| --lang cn \ | |
| --single-pred-prompt \ | |
| --temperature 0 \ | |
| --conv-mode vicuna_v1 | |
| mkdir -p playground/data/eval/mmbench_cn/answers_upload/${SPLIT}_coco_raw_200k_ft_200k | |
| python scripts/convert_mmbench_for_submission.py \ | |
| --annotation-file ./playground/data/eval/mmbench_cn/$SPLIT.tsv \ | |
| --result-dir ./playground/data/eval/mmbench_cn/answers/$SPLIT \ | |
| --upload-dir ./playground/data/eval/mmbench_cn/answers_upload/${SPLIT}_coco_raw_200k_ft_200k \ | |
| --experiment llava-v1.5-13b_coco_raw_200k_ft_200k | |