| SPLIT="mmbench_dev_cn_20231003" | |
| python -m llava.eval.model_vqa_mmbench \ | |
| --model-path /mnt/petrelfs/zhuchenglin/LLaVA/checkpoints_ft/llava_raw_200k/llava-v1.5-13b-pretrain \ | |
| --model-base lmsys/vicuna-13b-v1.5 \ | |
| --question-file ./playground/data/eval/mmbench_cn/$SPLIT.tsv \ | |
| --answers-file ./playground/data/eval/mmbench_cn/answers/$SPLIT/llava-v1.5-13b_llava_raw_200k.jsonl \ | |
| --lang cn \ | |
| --single-pred-prompt \ | |
| --temperature 0 \ | |
| --conv-mode vicuna_v1 | |
| mkdir -p playground/data/eval/mmbench_cn/answers_upload/${SPLIT}_llava_raw_200k | |
| python scripts/convert_mmbench_for_submission.py \ | |
| --annotation-file ./playground/data/eval/mmbench_cn/$SPLIT.tsv \ | |
| --result-dir ./playground/data/eval/mmbench_cn/answers/$SPLIT \ | |
| --upload-dir ./playground/data/eval/mmbench_cn/answers_upload/${SPLIT}_llava_raw_200k \ | |
| --experiment llava-v1.5-13b_llava_raw_200k | |