| python -m llava.eval.model_vqa_loader \ | |
| --model-path /mnt/petrelfs/zhuchenglin/LLaVA/checkpoints_ft/llava_gen_200k/llava-v1.5-13b_865k \ | |
| --question-file ./playground/data/eval/MME/llava_mme.jsonl \ | |
| --image-folder ./playground/data/eval/MME/MME_Benchmark_release_version \ | |
| --answers-file ./playground/data/eval/MME/answers/llava-v1.5-13b_llava_gen_200k_ft_865k.jsonl \ | |
| --temperature 0 \ | |
| --conv-mode vicuna_v1 | |
| cd ./playground/data/eval/MME | |
| python convert_answer_to_mme.py --experiment llava-v1.5-13b_llava_gen_200k_ft_865k | |
| cd eval_tool | |
| python calculation.py --results_dir answers/llava-v1.5-13b_llava_gen_200k_ft_865k | |