| python -m llava.eval.model_vqa \ | |
| --model-path /mnt/petrelfs/zhuchenglin/LLaVA/checkpoints_ft/llava_gen_200k/llava-v1.5-13b \ | |
| --question-file ./playground/data/eval/mm-vet/llava-mm-vet.jsonl \ | |
| --image-folder ./playground/data/eval/mm-vet/images \ | |
| --answers-file ./playground/data/eval/mm-vet/answers/llava-v1.5-13b_llava_gen_200k_ft_200k.jsonl \ | |
| --temperature 0 \ | |
| --conv-mode vicuna_v1 | |
| mkdir -p ./playground/data/eval/mm-vet/results | |
| python scripts/convert_mmvet_for_eval.py \ | |
| --src ./playground/data/eval/mm-vet/answers/llava-v1.5-13b_llava_gen_200k_ft_200k.jsonl \ | |
| --dst ./playground/data/eval/mm-vet/results/llava-v1.5-13b_llava_gen_200k_ft_200k.json | |