python -m llava.eval.model_vqa_science \ | |
--model-path /mnt/petrelfs/zhuchenglin/LLaVA/checkpoints_coco_gen_336px/llava-v1.5-13b-pretrain \ | |
--model-base lmsys/vicuna-13b-v1.5 \ | |
--question-file ./playground/data/eval/scienceqa/llava_test_CQM-A.json \ | |
--image-folder ./playground/data/eval/scienceqa/images/test \ | |
--answers-file ./playground/data/eval/scienceqa/answers/llava-v1.5-13b_coco_gen_336px.jsonl \ | |
--single-pred-prompt \ | |
--temperature 0 \ | |
--conv-mode vicuna_v1 | |
python llava/eval/eval_science_qa.py \ | |
--base-dir ./playground/data/eval/scienceqa \ | |
--result-file ./playground/data/eval/scienceqa/answers/llava-v1.5-13b_coco_gen_336px.jsonl \ | |
--output-file ./playground/data/eval/scienceqa/answers/llava-v1.5-13b_output_coco_gen_336px.jsonl \ | |
--output-result ./playground/data/eval/scienceqa/answers/llava-v1.5-13b_result_coco_gen_336px.json | |