| python -m llava.eval.model_vqa_science \ | |
| --model-path /mnt/petrelfs/zhuchenglin/LLaVA/checkpoints_ft/llava_coco_gen_758k/llava-v1.5-13b \ | |
| --question-file ./playground/data/eval/scienceqa/llava_test_CQM-A.json \ | |
| --image-folder ./playground/data/eval/scienceqa/images/test \ | |
| --answers-file ./playground/data/eval/scienceqa/answers/llava-v1.5-13b_llava_coco_gen_758k_ft_200k.jsonl \ | |
| --single-pred-prompt \ | |
| --temperature 0 \ | |
| --conv-mode vicuna_v1 | |
| python llava/eval/eval_science_qa.py \ | |
| --base-dir ./playground/data/eval/scienceqa \ | |
| --result-file ./playground/data/eval/scienceqa/answers/llava-v1.5-13b_llava_coco_gen_758k_ft_200k.jsonl \ | |
| --output-file ./playground/data/eval/scienceqa/answers/llava-v1.5-13b_output_llava_coco_gen_758k_ft_200k.jsonl \ | |
| --output-result ./playground/data/eval/scienceqa/answers/llava-v1.5-13b_result_llava_coco_gen_758k_ft_200k.json | |