|
import argparse |
|
import yaml |
|
import os |
|
import json |
|
import logging |
|
import datetime |
|
from typing import Dict, Any, List, Set, Tuple |
|
from datasets import load_dataset, Image as HFImage |
|
from tqdm import tqdm |
|
from PIL import Image as PILImage |
|
|
|
|
|
GREEN = '\033[92m' |
|
RED = '\033[91m' |
|
RESET = '\033[0m' |
|
YELLOW = '\033[93m' |
|
CYAN = '\033[96m' |
|
MAGENTA = '\033[95m' |
|
|
|
|
|
from utils import load_api_key |
|
from llm_interface import get_openrouter_prediction |
|
|
|
from evaluation import calculate_accuracy, calculate_exam_scores, calculate_single_question_score_details |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
|
|
|
def get_available_models(config_path: str) -> List[str]: |
|
"""Loads models from the benchmark configuration YAML file.""" |
|
try: |
|
with open(config_path, 'r') as f: |
|
config = yaml.safe_load(f) |
|
models = config.get("openrouter_models", []) |
|
if not models: |
|
logging.warning(f"No models found in {config_path} under 'openrouter_models'.") |
|
return models |
|
except FileNotFoundError: |
|
logging.error(f"Configuration file not found at {config_path} for model retrieval.") |
|
return [] |
|
except yaml.YAMLError as e: |
|
logging.error(f"Error parsing configuration file {config_path} for model retrieval: {e}") |
|
return [] |
|
except Exception as e: |
|
logging.error(f"Unexpected error retrieving models from {config_path}: {e}") |
|
return [] |
|
|
|
def get_available_exam_details(metadata_path: str) -> Tuple[List[str], List[str]]: |
|
"""Reads metadata.jsonl to get unique exam names and years.""" |
|
exam_names: Set[str] = set() |
|
exam_years: Set[str] = set() |
|
try: |
|
with open(metadata_path, 'r') as f: |
|
for line in f: |
|
try: |
|
data = json.loads(line) |
|
if 'exam_name' in data: |
|
exam_names.add(data['exam_name']) |
|
if 'exam_year' in data: |
|
exam_years.add(str(data['exam_year'])) |
|
except json.JSONDecodeError: |
|
logging.warning(f"Skipping malformed JSON line in {metadata_path}: {line.strip()}") |
|
|
|
sorted_exam_names = sorted(list(exam_names)) |
|
sorted_exam_years = sorted(list(exam_years)) |
|
|
|
if not sorted_exam_names: |
|
logging.warning(f"No exam names found in {metadata_path}.") |
|
if not sorted_exam_years: |
|
logging.warning(f"No exam years found in {metadata_path}.") |
|
|
|
return sorted_exam_names, sorted_exam_years |
|
except FileNotFoundError: |
|
logging.error(f"Metadata file not found at {metadata_path}.") |
|
return [], [] |
|
except Exception as e: |
|
logging.error(f"Unexpected error reading or parsing {metadata_path}: {e}") |
|
return [], [] |
|
|
|
def load_config(config_path: str) -> dict: |
|
"""Loads the benchmark configuration from a YAML file.""" |
|
try: |
|
with open(config_path, 'r') as f: |
|
config = yaml.safe_load(f) |
|
logging.info(f"Configuration loaded from {config_path}") |
|
return config |
|
except FileNotFoundError: |
|
logging.error(f"Configuration file not found at {config_path}") |
|
raise |
|
except yaml.YAMLError as e: |
|
logging.error(f"Error parsing configuration file {config_path}: {e}") |
|
raise |
|
|
|
def append_prediction(result: Dict[str, Any], filepath: str): |
|
"""Appends a single prediction result to a JSONL file.""" |
|
|
|
|
|
prediction_data = result.copy() |
|
prediction_data.pop('marks_awarded', None) |
|
prediction_data.pop('evaluation_status', None) |
|
prediction_data.pop('predicted_answer', None) |
|
prediction_data.pop('ground_truth', None) |
|
try: |
|
with open(filepath, 'a') as f: |
|
json.dump(prediction_data, f) |
|
f.write('\n') |
|
except IOError as e: |
|
logging.error(f"Failed to append prediction to {filepath}: {e}") |
|
except Exception as e: |
|
logging.error(f"Unexpected error appending prediction to {filepath}: {e}") |
|
|
|
def append_summary_detail(result_detail: Dict[str, Any], filepath: str): |
|
"""Appends a single question's summary details (evaluation status, marks, predicted, truth) to a JSONL file.""" |
|
try: |
|
with open(filepath, 'a') as f: |
|
json.dump(result_detail, f) |
|
f.write('\n') |
|
except IOError as e: |
|
logging.error(f"Failed to append summary detail to {filepath}: {e}") |
|
except Exception as e: |
|
logging.error(f"Unexpected error appending summary detail to {filepath}: {e}") |
|
|
|
|
|
|
|
|
|
def generate_markdown_summary(summary: Dict[str, Any], filepath: str): |
|
"""Generates a human-readable Markdown summary from the results dictionary.""" |
|
try: |
|
md_content = [] |
|
model_name = summary.get("model_name", "N/A") |
|
exam_name = summary.get("exam_name", "N/A") |
|
exam_year = summary.get("exam_year", "N/A") |
|
timestamp = summary.get("timestamp", "N/A") |
|
total_questions_in_dataset = summary.get("total_questions_in_dataset", 0) |
|
total_questions_processed_in_run = summary.get("total_questions_processed_in_run", 0) |
|
|
|
filtered_questions_count = 0 |
|
if total_questions_in_dataset > 0 and total_questions_processed_in_run > 0: |
|
filtered_questions_count = total_questions_in_dataset - total_questions_processed_in_run |
|
|
|
|
|
md_content.append(f"# Benchmark Results: {model_name}") |
|
if exam_name and exam_name not in ["N/A", "All_Exams"]: |
|
md_content.append(f"**Exam Name:** {exam_name}") |
|
if exam_year and exam_year not in ["N/A", "All_Years"]: |
|
md_content.append(f"**Exam Year:** {exam_year}") |
|
md_content.append(f"**Timestamp:** {timestamp}") |
|
md_content.append(f"**Total Questions in Dataset:** {total_questions_in_dataset if total_questions_in_dataset > 0 else 'N/A'}") |
|
if filtered_questions_count > 0: |
|
md_content.append(f"**Questions Filtered Out:** {filtered_questions_count}") |
|
md_content.append(f"**Total Questions Processed in this Run:** {total_questions_processed_in_run}") |
|
|
|
md_content.append("\n---\n") |
|
|
|
|
|
if "overall_score" in summary and "section_breakdown" in summary: |
|
total_processed = summary.get("total_questions_processed", 0) |
|
|
|
overall_score = summary.get('overall_score', 'N/A') |
|
total_possible_score = summary.get('total_possible_score_for_processed_questions', 'N/A') |
|
correct_full_count = summary.get('overall_correct_full', 'N/A') |
|
partial_correct_count = summary.get('overall_partial_correct', 'N/A') |
|
incorrect_choice_count = summary.get('overall_incorrect_choice', 'N/A') |
|
skipped_count = summary.get('overall_skipped', 'N/A') |
|
failures_count = summary.get('overall_api_parse_failures', 'N/A') |
|
unmapped_count = summary.get('unmapped_section_questions', 'N/A') |
|
|
|
md_content.append("## Exam Scoring Results") |
|
md_content.append(f"**Overall Score:** **{overall_score}** / **{total_possible_score}**") |
|
md_content.append(f"- **Fully Correct Answers:** {correct_full_count}") |
|
if partial_correct_count != 'N/A' and partial_correct_count > 0 : |
|
md_content.append(f"- **Partially Correct Answers:** {partial_correct_count}") |
|
md_content.append(f"- **Incorrectly Answered (Choice Made):** {incorrect_choice_count}") |
|
md_content.append(f"- **Skipped Questions:** {skipped_count}") |
|
md_content.append(f"- **API/Parse Failures:** {failures_count}") |
|
md_content.append(f"- **Total Questions Processed:** {total_processed}") |
|
if unmapped_count > 0: |
|
md_content.append(f"- **Unmapped Section Questions:** {unmapped_count} *(Not included in section breakdown)*") |
|
|
|
md_content.append("\n### Detailed Score Calculation by Question Type") |
|
question_type_breakdown = summary.get("question_type_breakdown", {}) |
|
if question_type_breakdown: |
|
sorted_q_types = sorted(question_type_breakdown.keys()) |
|
for q_type in sorted_q_types: |
|
stats = question_type_breakdown[q_type] |
|
q_type_display = q_type.replace('_', ' ').title() |
|
max_score_per_q = stats.get('max_score_per_question', 0) |
|
|
|
correct_count_q = stats.get('correct_full', 0) |
|
partial_count_q = stats.get('partial_correct', 0) |
|
incorrect_count_q = stats.get('incorrect_choice', 0) |
|
skipped_count_q = stats.get('skipped', 0) |
|
api_fail_count_q = stats.get('api_parse_failures', 0) |
|
score_q = stats.get('score', 0) |
|
|
|
calculation_parts = [] |
|
if correct_count_q > 0: |
|
calculation_parts.append(f"{correct_count_q} Correct (+{max_score_per_q})") |
|
if partial_count_q > 0: |
|
|
|
|
|
calculation_parts.append(f"{partial_count_q} Partial") |
|
if incorrect_count_q > 0: |
|
|
|
|
|
penalty_per_incorrect = 0 |
|
if q_type == "MCQ_SINGLE_CORRECT": penalty_per_incorrect = -1 |
|
elif q_type == "MCQ_MULTIPLE_CORRECT": penalty_per_incorrect = -2 |
|
calculation_parts.append(f"{incorrect_count_q} Incorrect ({penalty_per_incorrect})") |
|
if skipped_count_q > 0: |
|
calculation_parts.append(f"{skipped_count_q} Skipped (0)") |
|
if api_fail_count_q > 0: |
|
|
|
penalty_per_api_fail = -1 |
|
if q_type == "INTEGER": penalty_per_api_fail = 0 |
|
calculation_parts.append(f"{api_fail_count_q} API/Parse Fail ({penalty_per_api_fail})") |
|
|
|
calculation_str = " + ".join(part for part in calculation_parts if part) |
|
if not calculation_str: |
|
calculation_str = "No questions of this type processed or all had 0 score change." |
|
|
|
md_content.append(f"**{q_type_display} ({stats.get('count', 0)} questions):** {score_q} marks") |
|
md_content.append(f" *Calculation:* {calculation_str} = {score_q}") |
|
else: |
|
md_content.append("No question type breakdown available.") |
|
|
|
md_content.append("\n### Section Breakdown") |
|
md_content.append("| Section | Score | Fully Correct | Partially Correct | Incorrect Choice | Skipped | API/Parse Failures |") |
|
md_content.append("|---------------|-------|---------------|-------------------|------------------|---------|--------------------|") |
|
section_breakdown = summary.get("section_breakdown", {}) |
|
|
|
sorted_section_names = sorted(section_breakdown.keys()) |
|
if not sorted_section_names and section_breakdown: |
|
logging.warning("Could not sort section names for Markdown summary; using unsorted.") |
|
sorted_section_names = list(section_breakdown.keys()) |
|
|
|
for section_name in sorted_section_names: |
|
stats = section_breakdown.get(section_name, {}) |
|
score = stats.get('score', 'N/A') |
|
s_correct = stats.get('correct', 'N/A') |
|
s_partial = stats.get('partial_correct', 'N/A') |
|
s_incorrect = stats.get('incorrect', 'N/A') |
|
s_skipped = stats.get('skipped', 'N/A') |
|
s_failures = stats.get('api_parse_failures', 'N/A') |
|
display_section_name = section_name.replace('_', ' ') |
|
md_content.append(f"| {display_section_name:<13} | {score:<5} | {s_correct:<13} | {s_partial:<17} | {s_incorrect:<16} | {s_skipped:<7} | {s_failures:<18} |") |
|
if not sorted_section_names: |
|
md_content.append("| No section data available | N/A | N/A | N/A | N/A | N/A | N/A |") |
|
|
|
|
|
elif "accuracy_on_parsed" in summary: |
|
md_content.append("## Simple Accuracy Results (Fallback)") |
|
md_content.append(f"- **Accuracy (on successfully parsed non-skipped):** {summary.get('accuracy_on_parsed', 'N/A'):.4f}") |
|
md_content.append(f"- **Total Processed Attempts:** {summary.get('total_processed_attempts', 'N/A')}") |
|
|
|
else: |
|
md_content.append("## Summary") |
|
md_content.append("*(No specific Exam Scoring or Accuracy metrics found in summary)*") |
|
|
|
|
|
with open(filepath, 'w') as f: |
|
f.write("\n".join(md_content)) |
|
logging.info(f"Markdown summary saved to {filepath}") |
|
|
|
except IOError as e: |
|
logging.error(f"Failed to save markdown summary to {filepath}: {e}") |
|
except Exception as e: |
|
logging.error(f"Unexpected error generating or saving markdown summary to {filepath}: {e}") |
|
|
|
|
|
def run_benchmark( |
|
config: dict, |
|
api_key: str, |
|
model_to_run: str, |
|
output_dir_override: str | None = None, |
|
exam_name_choice: str | None = None, |
|
exam_year_choice: str | None = None, |
|
question_ids_str: str | None = None |
|
): |
|
"""Runs the benchmark evaluation loop with incremental saving and retries.""" |
|
|
|
|
|
models_to_run = [model_to_run] |
|
logging.info(f"Target model for this run: {model_to_run}") |
|
|
|
|
|
base_output_dir = output_dir_override if output_dir_override else config.get("results_base_dir", "results") |
|
os.makedirs(base_output_dir, exist_ok=True) |
|
|
|
|
|
dataset_path = config.get("dataset_path", ".") |
|
try: |
|
|
|
|
|
|
|
|
|
|
|
dataset = load_dataset(dataset_path, split='test', data_files={'test': 'data/metadata.jsonl'}, data_dir=os.getcwd(), trust_remote_code=True, download_mode="force_redownload") |
|
dataset = dataset.cast_column("image", HFImage(decode=True)) |
|
logging.info(f"Dataset loaded successfully from path: {dataset_path}. Original number of questions: {len(dataset)}") |
|
except Exception as e: |
|
logging.error(f"Failed to load dataset from path '{dataset_path}': {e}") |
|
logging.error("Ensure the path is correct and 'jee_neet_benchmark_dataset.py' exists.") |
|
return |
|
|
|
|
|
original_dataset_size = len(dataset) |
|
|
|
|
|
if exam_name_choice and exam_name_choice.lower() != "all": |
|
logging.info(f"Filtering dataset for exam_name: '{exam_name_choice}'") |
|
dataset = dataset.filter(lambda example: example.get('exam_name') == exam_name_choice) |
|
logging.info(f"Dataset size after exam_name filter: {len(dataset)} questions.") |
|
|
|
|
|
if exam_year_choice and exam_year_choice.lower() != "all": |
|
try: |
|
filter_year_int = int(exam_year_choice) |
|
logging.info(f"Filtering dataset for exam_year: {filter_year_int}") |
|
dataset = dataset.filter(lambda example: example.get('exam_year') == filter_year_int) |
|
logging.info(f"Dataset size after exam_year filter: {len(dataset)} questions.") |
|
except ValueError: |
|
logging.error(f"Invalid exam_year provided: '{exam_year_choice}'. Must be an integer or 'all'. Year filtering skipped.") |
|
|
|
|
|
if question_ids_str: |
|
try: |
|
target_question_ids = {q_id.strip() for q_id in question_ids_str.split(',') if q_id.strip()} |
|
if target_question_ids: |
|
logging.info(f"Filtering dataset for specific question IDs: {target_question_ids}") |
|
dataset = dataset.filter(lambda example: example.get('question_id') in target_question_ids) |
|
logging.info(f"Dataset size after question_id filter: {len(dataset)} questions.") |
|
else: |
|
logging.warning("Empty or invalid question_ids string provided. No question ID filtering applied.") |
|
except Exception as e: |
|
logging.error(f"Error processing question_ids_str '{question_ids_str}': {e}. No question ID filtering applied.") |
|
|
|
if len(dataset) < original_dataset_size: |
|
logging.info(f"Final dataset size after all filters: {len(dataset)} (originally {original_dataset_size}).") |
|
|
|
if len(dataset) == 0: |
|
logging.warning("No questions to process after filtering. Skipping model benchmark.") |
|
return |
|
|
|
|
|
for model_id in models_to_run: |
|
|
|
current_total_questions = len(dataset) |
|
logging.info(f"--- Starting benchmark for model: {model_id} (Processing {current_total_questions} questions) ---") |
|
|
|
|
|
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") |
|
safe_model_name = model_id.replace('/', '_') |
|
dir_name_parts = [safe_model_name] |
|
|
|
|
|
current_exam_name_for_dir = exam_name_choice if exam_name_choice and exam_name_choice.lower() != "all" else "AllExams" |
|
current_exam_year_for_dir = exam_year_choice if exam_year_choice and exam_year_choice.lower() != "all" else "AllYears" |
|
|
|
if current_exam_name_for_dir != "AllExams": |
|
dir_name_parts.append(current_exam_name_for_dir.replace('/', '_')) |
|
if current_exam_year_for_dir != "AllYears": |
|
dir_name_parts.append(str(current_exam_year_for_dir)) |
|
|
|
dir_name_parts.append(timestamp) |
|
|
|
model_output_dir_name = "_".join(filter(None, dir_name_parts)) |
|
model_output_dir = os.path.join(base_output_dir, model_output_dir_name) |
|
os.makedirs(model_output_dir, exist_ok=True) |
|
predictions_path = os.path.join(model_output_dir, "predictions.jsonl") |
|
summary_details_path = os.path.join(model_output_dir, "summary.jsonl") |
|
markdown_summary_path = os.path.join(model_output_dir, "summary.md") |
|
logging.info(f"Results for {model_id} will be saved to: {model_output_dir}") |
|
|
|
model_results = [] |
|
failed_questions_data = [] |
|
|
|
|
|
initial_correct_count = 0 |
|
initial_incorrect_count = 0 |
|
initial_skipped_count = 0 |
|
initial_parse_fail_count = 0 |
|
initial_api_fail_count = 0 |
|
|
|
|
|
pbar_initial = tqdm(dataset, desc=f"Processing {model_id} (Initial Pass)", total=current_total_questions) |
|
for example in pbar_initial: |
|
question_id = example["question_id"] |
|
subject = example["subject"] |
|
exam_name_from_data = example.get("exam_name", "UNKNOWN_EXAM") |
|
question_type_from_data = example.get("question_type", "MCQ_SINGLE_CORRECT") |
|
image: PILImage.Image = example["image"] |
|
truth = json.loads(example["correct_answer"]) |
|
|
|
result_data = { |
|
"question_id": question_id, |
|
"subject": subject, |
|
"exam_name": exam_name_from_data, |
|
"question_type": question_type_from_data, |
|
"ground_truth": truth, |
|
"predicted_answer": None, |
|
"raw_response": None, |
|
"parse_successful": False, |
|
"api_call_successful": False, |
|
"error": None, |
|
"attempt": 1, |
|
|
|
"previous_raw_response_on_reprompt": None |
|
} |
|
|
|
try: |
|
|
|
logging.info(f"Attempting API call for question: {question_id} with model: {model_id}") |
|
|
|
parsed_answer, raw_response = get_openrouter_prediction( |
|
model_identifier=model_id, |
|
api_key=api_key, |
|
image=image, |
|
exam_name=exam_name_from_data, |
|
exam_year=str(example.get("exam_year", "UNKNOWN_YEAR")), |
|
question_type=question_type_from_data, |
|
max_tokens=config.get("max_tokens", 100), |
|
request_timeout=config.get("request_timeout", 60) |
|
) |
|
|
|
api_success_attempt1 = True |
|
parse_success_attempt1 = parsed_answer is not None |
|
raw_response_attempt1 = raw_response |
|
|
|
|
|
|
|
if api_success_attempt1 and not parse_success_attempt1 and raw_response_attempt1 is not None: |
|
logging.warning(f"Question {question_id}: Initial parse failed. Attempting re-prompt.") |
|
result_data["previous_raw_response_on_reprompt"] = raw_response_attempt1 |
|
try: |
|
|
|
parsed_answer_rp, raw_response_rp = get_openrouter_prediction( |
|
model_identifier=model_id, |
|
api_key=api_key, |
|
previous_raw_response=raw_response_attempt1, |
|
question_type=question_type_from_data, |
|
max_tokens=config.get("max_tokens", 100), |
|
request_timeout=config.get("request_timeout", 60) |
|
) |
|
|
|
if isinstance(parsed_answer_rp, list): |
|
processed_answer_rp = [str(item) for item in parsed_answer_rp] |
|
else: |
|
processed_answer_rp = parsed_answer_rp |
|
|
|
result_data.update({ |
|
"predicted_answer": processed_answer_rp, |
|
"raw_response": raw_response_rp, |
|
"parse_successful": processed_answer_rp is not None, |
|
"api_call_successful": True, |
|
"attempt": 2 |
|
|
|
}) |
|
|
|
|
|
logging.info(f"Question {question_id}: Re-prompt {'succeeded' if result_data['parse_successful'] else 'failed to parse'}.") |
|
except Exception as e_rp: |
|
logging.error(f"Re-prompt API call failed for question {question_id}: {e_rp}") |
|
result_data.update({ |
|
"predicted_answer": None, |
|
"raw_response": raw_response_attempt1, |
|
"parse_successful": False, |
|
"api_call_successful": True, |
|
"error": f"Initial parse failed. Re-prompt API call failed: {str(e_rp)}", |
|
"attempt": 1 |
|
}) |
|
else: |
|
current_error = result_data.get("error") |
|
api_actually_successful = api_success_attempt1 |
|
if api_success_attempt1 and raw_response_attempt1 is None and parsed_answer is None: |
|
current_error = "Initial API call returned empty content. Re-prompt skipped." |
|
|
|
|
|
if isinstance(parsed_answer, list): |
|
processed_initial_answer = [str(item) for item in parsed_answer] |
|
else: |
|
processed_initial_answer = parsed_answer |
|
|
|
result_data.update({ |
|
"predicted_answer": processed_initial_answer, |
|
"raw_response": raw_response_attempt1, |
|
"parse_successful": parse_success_attempt1, |
|
"api_call_successful": api_actually_successful, |
|
"error": current_error, |
|
"attempt": 1 |
|
}) |
|
|
|
|
|
score_details = calculate_single_question_score_details(result_data) |
|
result_data['marks_awarded'] = score_details.get('marks_awarded') |
|
result_data['evaluation_status'] = score_details.get('evaluation_status') |
|
|
|
|
|
summary_detail_data = { |
|
"question_id": question_id, |
|
"marks_awarded": result_data['marks_awarded'], |
|
"evaluation_status": result_data['evaluation_status'], |
|
"predicted_answer": result_data['predicted_answer'], |
|
"ground_truth": result_data['ground_truth'], |
|
"attempt": result_data['attempt'] |
|
} |
|
append_summary_detail(summary_detail_data, summary_details_path) |
|
|
|
model_results.append(result_data) |
|
append_prediction(result_data, predictions_path) |
|
|
|
final_parsed_answer = result_data["predicted_answer"] |
|
log_message_prefix = f"Question {question_id}:" |
|
log_message_suffix = f"(Attempt {result_data['attempt']})" |
|
|
|
if not result_data["api_call_successful"]: |
|
initial_api_fail_count += 1 |
|
logging.info(f"{MAGENTA}{log_message_prefix} API Call Failed {log_message_suffix}{RESET}") |
|
elif not result_data["parse_successful"]: |
|
initial_parse_fail_count += 1 |
|
logging.info(f"{CYAN}{log_message_prefix} Failed to parse answer {log_message_suffix}{RESET}") |
|
elif final_parsed_answer == "SKIP": |
|
initial_skipped_count += 1 |
|
logging.info(f"{YELLOW}{log_message_prefix} Skipped {log_message_suffix}{RESET}") |
|
else: |
|
marks_awarded = result_data.get('marks_awarded', 0) |
|
evaluation_status_value = result_data.get('evaluation_status') |
|
|
|
|
|
is_considered_correct = False |
|
log_display_status = "N/A" |
|
status_check_string = "" |
|
|
|
if evaluation_status_value is True: |
|
is_considered_correct = True |
|
log_display_status = "True (Boolean)" |
|
status_check_string = "CORRECT_TRUE_BOOLEAN" |
|
elif isinstance(evaluation_status_value, str): |
|
log_display_status = evaluation_status_value |
|
status_check_string = evaluation_status_value.strip().upper() |
|
if "CORRECT" in status_check_string: |
|
is_considered_correct = True |
|
elif evaluation_status_value is None: |
|
log_display_status = "None" |
|
status_check_string = "NONE_STATUS" |
|
else: |
|
log_display_status = str(evaluation_status_value) |
|
status_check_string = str(evaluation_status_value).strip().upper() |
|
|
|
|
|
known_eval_skip_statuses = ["SKIPPED_BY_EVAL", "SKIPPED"] |
|
|
|
if is_considered_correct: |
|
initial_correct_count +=1 |
|
logging.info(f"{GREEN}{log_message_prefix} Correct (log) - Marks: {marks_awarded}, Status: {log_display_status} {log_message_suffix}{RESET}") |
|
elif status_check_string in known_eval_skip_statuses: |
|
initial_skipped_count += 1 |
|
logging.info(f"{YELLOW}{log_message_prefix} Skipped by Eval - Marks: {marks_awarded}, Status: {log_display_status} {log_message_suffix}{RESET}") |
|
else: |
|
initial_incorrect_count += 1 |
|
logging.info(f"{RED}{log_message_prefix} Incorrect (log) - Marks: {marks_awarded}, Status: {log_display_status} {log_message_suffix}{RESET}") |
|
|
|
pbar_initial.set_postfix_str(f"✓:{initial_correct_count} ✗:{initial_incorrect_count} S:{initial_skipped_count} P!:{initial_parse_fail_count} A!:{initial_api_fail_count}") |
|
|
|
except Exception as e: |
|
initial_api_fail_count +=1 |
|
pbar_initial.set_postfix_str(f"✓:{initial_correct_count} ✗:{initial_incorrect_count} S:{initial_skipped_count} P!:{initial_parse_fail_count} A!:{initial_api_fail_count}") |
|
logging.error(f"Initial API call failed for question {question_id} (Attempt 1): {e}") |
|
result_data["error"] = str(e) |
|
result_data["api_call_successful"] = False |
|
failed_questions_data.append(example) |
|
|
|
pbar_initial.close() |
|
|
|
|
|
if failed_questions_data: |
|
logging.info(f"--- Retrying {len(failed_questions_data)} questions with initial API failures for model: {model_id} ---") |
|
|
|
retry_correct_count = 0 |
|
retry_incorrect_count = 0 |
|
retry_skipped_count = 0 |
|
retry_parse_fail_count = 0 |
|
retry_api_fail_count = 0 |
|
|
|
pbar_retry = tqdm(failed_questions_data, desc=f"Processing {model_id} (API Retry Pass)", total=len(failed_questions_data)) |
|
for example_retry in pbar_retry: |
|
question_id_retry = example_retry["question_id"] |
|
subject_retry = example_retry["subject"] |
|
exam_name_retry = example_retry.get("exam_name", "UNKNOWN_EXAM") |
|
question_type_retry = example_retry.get("question_type", "MCQ_SINGLE_CORRECT") |
|
image_retry: PILImage.Image = example_retry["image"] |
|
truth_retry = example_retry["correct_answer"] |
|
|
|
result_data_retry = { |
|
"question_id": question_id_retry, |
|
"subject": subject_retry, |
|
"exam_name": exam_name_retry, |
|
"question_type": question_type_retry, |
|
"ground_truth": truth_retry, |
|
"predicted_answer": None, |
|
"raw_response": None, |
|
"parse_successful": False, |
|
"api_call_successful": False, |
|
"error": "Initial API call failed.", |
|
"attempt": 2, |
|
|
|
"previous_raw_response_on_reprompt_after_api_retry": None |
|
} |
|
|
|
try: |
|
logging.info(f"Attempting API call for question: {question_id_retry} (API Retry Pass) with model: {model_id}") |
|
parsed_answer_retry, raw_response_retry = get_openrouter_prediction( |
|
model_identifier=model_id, |
|
api_key=api_key, |
|
image=image_retry, |
|
exam_name=exam_name_retry, |
|
exam_year=str(example_retry.get("exam_year", "UNKNOWN_YEAR")), |
|
question_type=question_type_retry, |
|
max_tokens=config.get("max_tokens", 100), |
|
request_timeout=config.get("request_timeout", 60) |
|
) |
|
api_success_attempt2 = True |
|
parse_success_attempt2 = parsed_answer_retry is not None |
|
raw_response_attempt2 = raw_response_retry |
|
|
|
|
|
if api_success_attempt2 and not parse_success_attempt2 and raw_response_attempt2 is not None: |
|
logging.warning(f"Question {question_id_retry}: API Retry succeeded, but parse failed. Attempting re-prompt.") |
|
result_data_retry["previous_raw_response_on_reprompt_after_api_retry"] = raw_response_attempt2 |
|
try: |
|
|
|
parsed_answer_rp2, raw_response_rp2 = get_openrouter_prediction( |
|
model_identifier=model_id, |
|
api_key=api_key, |
|
previous_raw_response=raw_response_attempt2, |
|
question_type=question_type_retry, |
|
max_tokens=config.get("max_tokens", 100), |
|
request_timeout=config.get("request_timeout", 60) |
|
) |
|
|
|
if isinstance(parsed_answer_rp2, list): |
|
processed_answer_rp2 = [str(item) for item in parsed_answer_rp2] |
|
else: |
|
processed_answer_rp2 = parsed_answer_rp2 |
|
|
|
result_data_retry.update({ |
|
"predicted_answer": processed_answer_rp2, "raw_response": raw_response_rp2, |
|
"parse_successful": processed_answer_rp2 is not None, "api_call_successful": True, |
|
"error": None if processed_answer_rp2 is not None else "Re-prompt after API retry failed to parse.", |
|
"attempt": 3 |
|
}) |
|
|
|
|
|
logging.info(f"Question {question_id_retry}: API Retry + Re-prompt {'succeeded' if result_data_retry['parse_successful'] else 'failed to parse'}.") |
|
except Exception as e_rp2: |
|
logging.error(f"Re-prompt API call failed for question {question_id_retry} after API retry: {e_rp2}") |
|
result_data_retry.update({ |
|
"error": f"API retry ok, parse failed. Re-prompt API call failed: {str(e_rp2)}", |
|
"attempt": 2 |
|
}) |
|
else: |
|
current_error_retry = result_data_retry.get("error") |
|
if api_success_attempt2 and raw_response_attempt2 is None and parsed_answer_retry is None: |
|
current_error_retry = "API retry call returned empty content. Re-prompt skipped." |
|
|
|
|
|
if isinstance(parsed_answer_retry, list): |
|
processed_retry_answer = [str(item) for item in parsed_answer_retry] |
|
else: |
|
processed_retry_answer = parsed_answer_retry |
|
|
|
result_data_retry.update({ |
|
"predicted_answer": processed_retry_answer, "raw_response": raw_response_attempt2, |
|
"parse_successful": parse_success_attempt2, "api_call_successful": api_success_attempt2, |
|
"error": None if parse_success_attempt2 else current_error_retry, |
|
"attempt": 2 |
|
}) |
|
except Exception as e_retry_api: |
|
logging.error(f"API call failed permanently for question {question_id_retry} (Attempt 2 API Retry): {e_retry_api}") |
|
result_data_retry["error"] = f"Initial API fail. Retry API call also failed: {str(e_retry_api)}" |
|
result_data_retry["api_call_successful"] = False |
|
|
|
|
|
score_details_retry = calculate_single_question_score_details(result_data_retry) |
|
result_data_retry['marks_awarded'] = score_details_retry.get('marks_awarded') |
|
result_data_retry['evaluation_status'] = score_details_retry.get('evaluation_status') |
|
|
|
|
|
summary_detail_data_retry = { |
|
"question_id": question_id_retry, |
|
"marks_awarded": result_data_retry['marks_awarded'], |
|
"evaluation_status": result_data_retry['evaluation_status'], |
|
"predicted_answer": result_data_retry['predicted_answer'], |
|
"ground_truth": result_data_retry['ground_truth'], |
|
"attempt": result_data_retry['attempt'] |
|
} |
|
append_summary_detail(summary_detail_data_retry, summary_details_path) |
|
|
|
model_results.append(result_data_retry) |
|
append_prediction(result_data_retry, predictions_path) |
|
|
|
|
|
log_message_prefix_retry = f"Question {question_id_retry} (Retry):" |
|
log_message_suffix_retry = f"(Attempt {result_data_retry['attempt']})" |
|
final_parsed_answer_retry = result_data_retry["predicted_answer"] |
|
|
|
if not result_data_retry["api_call_successful"]: |
|
retry_api_fail_count += 1 |
|
logging.info(f"{MAGENTA}{log_message_prefix_retry} API Call Failed {log_message_suffix_retry}{RESET}") |
|
elif not result_data_retry["parse_successful"]: |
|
retry_parse_fail_count += 1 |
|
logging.info(f"{CYAN}{log_message_prefix_retry} Failed to parse answer {log_message_suffix_retry}{RESET}") |
|
elif final_parsed_answer_retry == "SKIP": |
|
retry_skipped_count += 1 |
|
logging.info(f"{YELLOW}{log_message_prefix_retry} Skipped {log_message_suffix_retry}{RESET}") |
|
else: |
|
marks_awarded_retry = result_data_retry.get('marks_awarded', 0) |
|
evaluation_status_value_retry = result_data_retry.get('evaluation_status') |
|
|
|
is_considered_correct_retry = False |
|
log_display_status_retry = "N/A" |
|
status_check_string_retry = "" |
|
|
|
if evaluation_status_value_retry is True: |
|
is_considered_correct_retry = True |
|
log_display_status_retry = "True (Boolean)" |
|
status_check_string_retry = "CORRECT_TRUE_BOOLEAN" |
|
elif isinstance(evaluation_status_value_retry, str): |
|
log_display_status_retry = evaluation_status_value_retry |
|
status_check_string_retry = evaluation_status_value_retry.strip().upper() |
|
if "CORRECT" in status_check_string_retry: |
|
is_considered_correct_retry = True |
|
elif evaluation_status_value_retry is None: |
|
log_display_status_retry = "None" |
|
status_check_string_retry = "NONE_STATUS" |
|
else: |
|
log_display_status_retry = str(evaluation_status_value_retry) |
|
status_check_string_retry = str(evaluation_status_value_retry).strip().upper() |
|
|
|
known_eval_skip_statuses_retry = ["SKIPPED_BY_EVAL", "SKIPPED"] |
|
|
|
if is_considered_correct_retry: |
|
retry_correct_count +=1 |
|
logging.info(f"{GREEN}{log_message_prefix_retry} Correct (log) - Marks: {marks_awarded_retry}, Status: {log_display_status_retry} {log_message_suffix_retry}{RESET}") |
|
elif status_check_string_retry in known_eval_skip_statuses_retry: |
|
retry_skipped_count += 1 |
|
logging.info(f"{YELLOW}{log_message_prefix_retry} Skipped by Eval - Marks: {marks_awarded_retry}, Status: {log_display_status_retry} {log_message_suffix_retry}{RESET}") |
|
else: |
|
retry_incorrect_count += 1 |
|
logging.info(f"{RED}{log_message_prefix_retry} Incorrect (log) - Marks: {marks_awarded_retry}, Status: {log_display_status_retry} {log_message_suffix_retry}{RESET}") |
|
|
|
pbar_retry.set_postfix_str(f"✓:{retry_correct_count} ✗:{retry_incorrect_count} S:{retry_skipped_count} P!:{retry_parse_fail_count} A!:{retry_api_fail_count}") |
|
pbar_retry.close() |
|
|
|
|
|
logging.info(f"--- Calculating final results for model: {model_id} ---") |
|
|
|
|
|
evaluation_summary = calculate_exam_scores(model_results) |
|
|
|
|
|
summary_exam_name_display = exam_name_choice if exam_name_choice and exam_name_choice.lower() != "all" else "All_Exams" |
|
summary_exam_year_display = exam_year_choice if exam_year_choice and exam_year_choice.lower() != "all" else "All_Years" |
|
|
|
summary = { |
|
"model_name": model_id, |
|
"exam_name": summary_exam_name_display, |
|
"exam_year": summary_exam_year_display, |
|
"question_ids_filter": question_ids_str if question_ids_str else "None", |
|
"timestamp": timestamp, |
|
"total_questions_in_dataset": original_dataset_size, |
|
"total_questions_processed_in_run": len(dataset), |
|
**evaluation_summary |
|
} |
|
logging.info(f"Overall Score: {summary.get('overall_score')}") |
|
logging.info(f"Full Correct: {summary.get('overall_correct_full')}, Partial Correct: {summary.get('overall_partial_correct')}, Incorrect Choice: {summary.get('overall_incorrect_choice')}, Skipped: {summary.get('overall_skipped')}, API/Parse Failures: {summary.get('overall_api_parse_failures')}") |
|
|
|
logging.info(f"--- Results Summary for model: {model_id} ---") |
|
logging.info(json.dumps(summary, indent=2, sort_keys=True)) |
|
logging.info("-------------------------------------") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
generate_markdown_summary(summary, markdown_summary_path) |
|
|
|
logging.info("Benchmark run completed.") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
|
|
default_config_path = "configs/benchmark_config.yaml" |
|
default_metadata_path = "data/metadata.jsonl" |
|
|
|
available_models = get_available_models(default_config_path) |
|
available_exam_names, available_exam_years = get_available_exam_details(default_metadata_path) |
|
|
|
|
|
exam_name_choices = ["all"] + available_exam_names |
|
exam_year_choices = ["all"] + available_exam_years |
|
|
|
parser = argparse.ArgumentParser(description="Run JEE/NEET LLM Benchmark.") |
|
parser.add_argument( |
|
"--config", |
|
type=str, |
|
default=default_config_path, |
|
help=f"Path to the benchmark configuration YAML file (default: {default_config_path})." |
|
) |
|
parser.add_argument( |
|
"--model", |
|
type=str, |
|
required=True if available_models else False, |
|
choices=available_models if available_models else None, |
|
help="Select the model to run." + (f" Available: {', '.join(available_models)}." if available_models else " (No models found in config)") |
|
) |
|
parser.add_argument( |
|
"--output_dir", |
|
type=str, |
|
help="Override the base output directory specified in the config file." |
|
) |
|
parser.add_argument( |
|
"--exam_name", |
|
type=str, |
|
default="all", |
|
choices=exam_name_choices if exam_name_choices else ["all"], |
|
help="Select the exam name to run, or 'all' for all exams." + (f" Available: {', '.join(available_exam_names)}." if available_exam_names else "") |
|
) |
|
parser.add_argument( |
|
"--exam_year", |
|
type=str, |
|
default="all", |
|
choices=exam_year_choices if exam_year_choices else ["all"], |
|
help="Select the exam year to run, or 'all' for all years." + (f" Available: {', '.join(available_exam_years)}." if available_exam_years else "") |
|
) |
|
parser.add_argument( |
|
"--question_ids", |
|
type=str, |
|
default=None, |
|
help="Optional: Comma-separated list of specific question IDs to run (e.g., ID1,ID2,ID3)." |
|
) |
|
args = parser.parse_args() |
|
|
|
|
|
if args.config != default_config_path: |
|
logging.info(f"User provided config path: {args.config}. Re-fetching models if necessary.") |
|
|
|
if not available_models or args.model not in available_models: |
|
user_config_models = get_available_models(args.config) |
|
if args.model not in user_config_models: |
|
logging.error(f"Selected model '{args.model}' not found in the specified config '{args.config}'. Exiting.") |
|
exit(1) |
|
|
|
|
|
|
|
try: |
|
|
|
api_key = load_api_key() |
|
|
|
config = load_config(args.config) |
|
|
|
|
|
if args.model not in config.get("openrouter_models", []): |
|
|
|
if args.model not in get_available_models(args.config): |
|
logging.error(f"The model '{args.model}' specified is not listed in the config file '{args.config}'. Please check the model name or the config file.") |
|
exit(1) |
|
|
|
|
|
|
|
run_benchmark( |
|
config=config, |
|
api_key=api_key, |
|
model_to_run=args.model, |
|
output_dir_override=args.output_dir, |
|
exam_name_choice=args.exam_name, |
|
exam_year_choice=args.exam_year, |
|
question_ids_str=args.question_ids |
|
) |
|
except (ValueError, FileNotFoundError, yaml.YAMLError) as e: |
|
logging.error(f"Setup failed: {e}") |
|
except Exception as e: |
|
logging.error(f"An unexpected error occurred during benchmark execution: {e}", exc_info=True) |
|
|