|
import logging |
|
import re |
|
from typing import List, Optional, Union, Dict, Any |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
|
|
|
def calculate_accuracy(predictions: List[Optional[List[str]]], ground_truths: List[List[str]]) -> float: |
|
""" |
|
Calculates the accuracy of LLM predictions against ground truths. |
|
|
|
Accuracy is defined as the percentage of predictions where the predicted list |
|
of answer strings exactly matches the ground truth list of answer strings |
|
(order within the list does not matter, comparison is case-insensitive). |
|
A prediction of None (due to parsing failure) is considered incorrect. |
|
|
|
Args: |
|
predictions (List[Optional[List[str]]]): A list where each element is either a list |
|
of predicted answer strings or None if |
|
parsing failed for that question. |
|
ground_truths (List[List[str]]): A list where each element is a list of |
|
ground truth answer strings. |
|
|
|
Returns: |
|
float: The calculated accuracy (between 0.0 and 1.0). |
|
|
|
Raises: |
|
ValueError: If the lengths of predictions and ground_truths lists do not match. |
|
""" |
|
if len(predictions) != len(ground_truths): |
|
raise ValueError(f"Length mismatch: Predictions ({len(predictions)}) vs Ground Truths ({len(ground_truths)})") |
|
|
|
if not ground_truths: |
|
return 0.0 |
|
|
|
correct_count = 0 |
|
for i, pred_list_orig in enumerate(predictions): |
|
truth_list_orig = ground_truths[i] |
|
|
|
|
|
|
|
|
|
sorted_pred = sorted([str(p).upper() for p in pred_list_orig]) if pred_list_orig is not None else None |
|
sorted_truth = sorted([str(t).upper() for t in truth_list_orig]) |
|
|
|
|
|
if sorted_pred is not None and sorted_pred == sorted_truth: |
|
correct_count += 1 |
|
else: |
|
|
|
if sorted_pred is not None: |
|
logging.debug(f"Incorrect prediction for index {i}: Pred={sorted_pred}, Truth={sorted_truth} (Original Pred: {pred_list_orig}, Original Truth: {truth_list_orig})") |
|
|
|
|
|
|
|
accuracy = correct_count / len(ground_truths) |
|
logging.info(f"Accuracy calculated: {correct_count}/{len(ground_truths)} = {accuracy:.4f}") |
|
return accuracy |
|
|
|
|
|
def get_subject_as_section(subject: str, question_num_for_log: int) -> Optional[str]: |
|
""" |
|
Returns the subject name directly as the section identifier. |
|
question_num_for_log is only used for logging context if subject is invalid. |
|
""" |
|
if subject and isinstance(subject, str) and subject.strip(): |
|
return subject.strip() |
|
else: |
|
logging.warning(f"Invalid or missing subject ('{subject}') for question_num '{question_num_for_log}'. Cannot determine section.") |
|
return None |
|
|
|
def is_within_range(predicted_value_str: str, lower_bound_str: str, upper_bound_str: str) -> bool: |
|
""" |
|
Checks if a predicted numerical value (as a string) falls within a specified range. |
|
The comparison is inclusive. |
|
""" |
|
try: |
|
predicted_value = float(predicted_value_str) |
|
lower_bound = float(lower_bound_str) |
|
upper_bound = float(upper_bound_str) |
|
except ValueError: |
|
logging.debug(f"Could not convert predicted value '{predicted_value_str}' or bounds ('{lower_bound_str}', '{upper_bound_str}') to numbers.") |
|
return False |
|
|
|
return lower_bound <= predicted_value <= upper_bound |
|
|
|
|
|
def calculate_single_question_score_details(result_item: Dict[str, Any]) -> Dict[str, Any]: |
|
""" |
|
Calculates marks_awarded and evaluation_status for a single question result. |
|
|
|
Args: |
|
result_item (Dict[str, Any]): A dictionary for a single question, must contain: |
|
'question_id' (str) |
|
'exam_name' (str) |
|
'question_type' (str) |
|
'ground_truth' (List[str] | str) # Changed to string |
|
'predicted_answer' (List[str] | str | None) # Changed to string |
|
'api_call_successful' (bool) |
|
|
|
Returns: |
|
Dict[str, Any]: A dictionary with 'marks_awarded' (int) and 'evaluation_status' (str). |
|
""" |
|
question_id = result_item.get("question_id", "UNKNOWN_QID") |
|
exam_name = result_item.get("exam_name", "").upper() |
|
question_type = result_item.get("question_type", "").upper() |
|
pred = result_item.get("predicted_answer") |
|
truth = result_item.get("ground_truth") |
|
api_success = result_item.get("api_call_successful", False) |
|
|
|
current_score_change = 0 |
|
evaluation_status = "unknown" |
|
|
|
|
|
|
|
|
|
|
|
truth_processed: List[Union[str, List[str]]] = [] |
|
if isinstance(truth, list): |
|
for t_item in truth: |
|
if isinstance(t_item, str): |
|
truth_processed.append(t_item.upper()) |
|
elif isinstance(t_item, list) and len(t_item) == 2 and all(isinstance(x, str) for x in t_item): |
|
truth_processed.append([x.upper() for x in t_item]) |
|
else: |
|
logging.error(f"Invalid item in ground_truth list for {question_id}: {t_item}. Skipping.") |
|
elif isinstance(truth, str): |
|
truth_processed.append(truth.upper()) |
|
else: |
|
logging.error(f"Invalid ground_truth format for {question_id}: {truth} (type: {type(truth)}). Assigning 0 marks.") |
|
return {"marks_awarded": 0, "evaluation_status": "error_bad_ground_truth"} |
|
|
|
|
|
if not api_success or pred is None: |
|
evaluation_status = "failure_api_or_parse" |
|
current_score_change = -1 |
|
if exam_name == "JEE_MAIN" and question_type == "INTEGER": |
|
current_score_change = 0 |
|
if exam_name == "JEE_ADVANCED" and question_type == "INTEGER": |
|
current_score_change = 0 |
|
elif isinstance(pred, str) and pred.upper() == "SKIP": |
|
current_score_change = 0 |
|
evaluation_status = "skipped" |
|
elif isinstance(pred, list) and all(isinstance(p, str) for p in pred): |
|
pred_set = {p.upper() for p in pred} |
|
|
|
|
|
|
|
if question_type == "MCQ_SINGLE_CORRECT": |
|
|
|
|
|
|
|
is_correct = False |
|
if len(pred_set) == 1: |
|
single_pred_answer = list(pred_set)[0] |
|
|
|
if single_pred_answer in truth_processed: |
|
is_correct = True |
|
|
|
if is_correct: |
|
evaluation_status = "correct" |
|
if exam_name == "NEET": current_score_change = 4 |
|
elif exam_name == "JEE_MAIN": current_score_change = 4 |
|
elif exam_name == "JEE_ADVANCED": current_score_change = 3 |
|
else: current_score_change = 1 |
|
else: |
|
evaluation_status = "incorrect" |
|
if exam_name == "NEET": current_score_change = -1 |
|
elif exam_name == "JEE_MAIN": current_score_change = -1 |
|
elif exam_name == "JEE_ADVANCED": current_score_change = -1 |
|
else: current_score_change = 0 |
|
|
|
elif exam_name == "JEE_MAIN" and question_type == "INTEGER": |
|
|
|
is_correct = False |
|
if len(pred_set) == 1: |
|
predicted_answer_str = list(pred_set)[0] |
|
if predicted_answer_str in truth_processed: |
|
is_correct = True |
|
|
|
if is_correct: |
|
current_score_change = 4; evaluation_status = "correct" |
|
else: |
|
current_score_change = 0; evaluation_status = "incorrect" |
|
|
|
elif exam_name == "JEE_ADVANCED": |
|
|
|
if question_type == "INTEGER": |
|
is_correct = False |
|
if len(pred_set) == 1: |
|
predicted_answer_str = list(pred_set)[0] |
|
|
|
|
|
for gt_entry in truth_processed: |
|
if isinstance(gt_entry, list) and len(gt_entry) == 2: |
|
lower_bound_str, upper_bound_str = gt_entry[0], gt_entry[1] |
|
if is_within_range(predicted_answer_str, lower_bound_str, upper_bound_str): |
|
is_correct = True |
|
break |
|
elif isinstance(gt_entry, str): |
|
if predicted_answer_str == gt_entry: |
|
is_correct = True |
|
break |
|
|
|
if is_correct: |
|
current_score_change = 4; evaluation_status = "correct" |
|
else: |
|
current_score_change = 0; evaluation_status = "incorrect" |
|
elif question_type == "MCQ_MULTIPLE_CORRECT": |
|
|
|
truth_set_mcq = set(truth_processed) |
|
|
|
num_correct_options_in_truth = len(truth_set_mcq) |
|
num_chosen_options = len(pred_set) |
|
correct_chosen_options = pred_set.intersection(truth_set_mcq) |
|
incorrect_chosen_options = pred_set.difference(truth_set_mcq) |
|
num_correct_chosen = len(correct_chosen_options) |
|
num_incorrect_chosen = len(incorrect_chosen_options) |
|
|
|
if num_incorrect_chosen > 0: |
|
current_score_change = -2; evaluation_status = "incorrect_negative" |
|
elif num_correct_chosen == num_correct_options_in_truth and num_chosen_options == num_correct_options_in_truth: |
|
current_score_change = 4; evaluation_status = "correct_full" |
|
elif num_correct_options_in_truth == 4 and num_correct_chosen == 3 and num_chosen_options == 3: |
|
current_score_change = 3; evaluation_status = "partial_3_of_4" |
|
elif num_correct_options_in_truth >= 3 and num_correct_chosen == 2 and num_chosen_options == 2: |
|
current_score_change = 2; evaluation_status = "partial_2_of_3_plus" |
|
elif num_correct_options_in_truth >= 2 and num_correct_chosen == 1 and num_chosen_options == 1: |
|
current_score_change = 1; evaluation_status = "partial_1_of_2_plus" |
|
else: |
|
current_score_change = 0; evaluation_status = "no_marks_no_penalty" |
|
else: |
|
logging.warning(f"Unknown exam_name/question_type combination for scoring: {exam_name}/{question_type} for QID {question_id}. Assigning 0 marks.") |
|
current_score_change = 0 |
|
evaluation_status = "unknown_exam_type" |
|
else: |
|
logging.error(f"Unexpected prediction type for {question_id}: {pred}. Treating as API/Parse Failure.") |
|
current_score_change = -1 |
|
evaluation_status = "failure_unexpected_type" |
|
|
|
return {"marks_awarded": current_score_change, "evaluation_status": evaluation_status} |
|
|
|
|
|
def calculate_max_score_for_question(exam_name: str, question_type: str) -> int: |
|
""" |
|
Returns the maximum possible score for a given exam and question type. |
|
""" |
|
exam_name = exam_name.upper() |
|
question_type = question_type.upper() |
|
|
|
if exam_name == "NEET" and question_type == "MCQ_SINGLE_CORRECT": |
|
return 4 |
|
elif exam_name == "JEE_MAIN": |
|
if question_type == "MCQ_SINGLE_CORRECT": |
|
return 4 |
|
elif question_type == "INTEGER": |
|
return 4 |
|
elif exam_name == "JEE_ADVANCED": |
|
if question_type == "MCQ_SINGLE_CORRECT": |
|
return 3 |
|
elif question_type == "INTEGER": |
|
return 4 |
|
elif question_type == "MCQ_MULTIPLE_CORRECT": |
|
return 4 |
|
return 0 |
|
|
|
|
|
def calculate_exam_scores(results: List[Dict[str, Any]]) -> Dict[str, Any]: |
|
""" |
|
Calculates exam scores based on exam_name and question_type, providing section-wise breakdown |
|
and detailed question type statistics. |
|
|
|
Args: |
|
results (List[Dict[str, Any]]): A list of result dictionaries. Each dict must contain: |
|
'question_id' (str) |
|
'subject' (str) |
|
'exam_name' (str) e.g., "NEET", "JEE_MAIN", "JEE_ADVANCED" |
|
'question_type' (str) e.g., "MCQ_SINGLE_CORRECT", "MCQ_MULTIPLE_CORRECT", "INTEGER" |
|
'ground_truth' (List[str] | str): Correct answer(s). For INTEGER, it's a single str. |
|
'predicted_answer' (List[str] | str | None): Model's prediction. |
|
'api_call_successful' (bool): Whether the API call succeeded. |
|
This list will be modified in-place to add 'evaluation_status' and 'marks_awarded' by calling |
|
calculate_single_question_score_details for each item. |
|
Returns: |
|
Dict[str, Any]: A dictionary containing overall and section-wise scores and counts, |
|
plus question type breakdowns and total possible score. |
|
""" |
|
if not results: |
|
return {"error": "No results provided."} |
|
|
|
overall_stats = { |
|
"score": 0, |
|
"correct": 0, |
|
"incorrect": 0, |
|
"skipped": 0, |
|
"api_parse_failures": 0, |
|
"partial_correct": 0, |
|
"total_possible_score": 0 |
|
} |
|
|
|
|
|
question_type_breakdown: Dict[str, Dict[str, Any]] = {} |
|
|
|
valid_subjects_from_data = [r.get("subject") for r in results if r.get("subject") and isinstance(r.get("subject"), str) and r.get("subject").strip()] |
|
if not valid_subjects_from_data and results: |
|
logging.warning("No valid subjects found in results data to initialize section_stats.") |
|
|
|
unique_subjects = sorted(list(set(s.strip() for s in valid_subjects_from_data))) |
|
section_stats = { |
|
subj: {"score": 0, "correct": 0, "incorrect": 0, "skipped": 0, "api_parse_failures": 0, "partial_correct": 0} |
|
for subj in unique_subjects |
|
} |
|
if not unique_subjects and results: |
|
logging.warning("section_stats is empty because no unique, valid subjects were found.") |
|
|
|
unmapped_section_questions = 0 |
|
|
|
for result in results: |
|
question_id = result.get("question_id") |
|
subject = result.get("subject") |
|
exam_name = result.get("exam_name", "").upper() |
|
question_type = result.get("question_type", "").upper() |
|
|
|
|
|
score_details = calculate_single_question_score_details(result) |
|
current_score_change = score_details.get("marks_awarded", 0) |
|
evaluation_status = score_details.get("evaluation_status", "unknown_error_in_scoring") |
|
|
|
|
|
result['evaluation_status'] = evaluation_status |
|
result['marks_awarded'] = current_score_change |
|
|
|
|
|
overall_stats["total_possible_score"] += calculate_max_score_for_question(exam_name, question_type) |
|
|
|
|
|
is_correct_full = evaluation_status in ["correct", "correct_full"] |
|
is_partial_correct = evaluation_status.startswith("partial_") |
|
is_incorrect_choice = evaluation_status in ["incorrect", "incorrect_negative"] |
|
is_skipped = evaluation_status == "skipped" |
|
is_api_parse_failure = evaluation_status in ["failure_api_or_parse", "failure_unexpected_type", "error_bad_ground_truth"] |
|
|
|
|
|
overall_stats["score"] += current_score_change |
|
if is_correct_full: overall_stats["correct"] += 1 |
|
if is_incorrect_choice: overall_stats["incorrect"] += 1 |
|
if is_skipped: overall_stats["skipped"] += 1 |
|
if is_api_parse_failure: overall_stats["api_parse_failures"] += 1 |
|
if is_partial_correct: overall_stats["partial_correct"] +=1 |
|
|
|
|
|
if question_type not in question_type_breakdown: |
|
question_type_breakdown[question_type] = { |
|
"count": 0, |
|
"score": 0, |
|
"correct_full": 0, |
|
"partial_correct": 0, |
|
"incorrect_choice": 0, |
|
"skipped": 0, |
|
"api_parse_failures": 0, |
|
"max_score_per_question": calculate_max_score_for_question(exam_name, question_type) |
|
} |
|
|
|
q_type_stats = question_type_breakdown[question_type] |
|
q_type_stats["count"] += 1 |
|
q_type_stats["score"] += current_score_change |
|
if is_correct_full: q_type_stats["correct_full"] += 1 |
|
if is_incorrect_choice: q_type_stats["incorrect_choice"] += 1 |
|
if is_skipped: q_type_stats["skipped"] += 1 |
|
if is_api_parse_failure: q_type_stats["api_parse_failures"] += 1 |
|
if is_partial_correct: q_type_stats["partial_correct"] += 1 |
|
|
|
section = None |
|
if subject: |
|
question_num_for_log = -1 |
|
if question_id: |
|
match_num = re.search(r'_(\d+)$', question_id) |
|
if match_num: |
|
try: question_num_for_log = int(match_num.group(1)) |
|
except ValueError: pass |
|
section = get_subject_as_section(subject, question_num_for_log) |
|
|
|
if section and section in section_stats: |
|
section_stats[section]["score"] += current_score_change |
|
if is_correct_full: section_stats[section]["correct"] += 1 |
|
if is_incorrect_choice: section_stats[section]["incorrect"] += 1 |
|
if is_skipped: section_stats[section]["skipped"] += 1 |
|
if is_api_parse_failure: section_stats[section]["api_parse_failures"] += 1 |
|
if is_partial_correct: section_stats[section]["partial_correct"] +=1 |
|
elif section is None and not is_api_parse_failure : |
|
unmapped_section_questions += 1 |
|
|
|
|
|
logging.info(f"Exam Score Calculation Complete. Overall Score: {overall_stats['score']}") |
|
if unmapped_section_questions > 0: |
|
logging.warning(f"{unmapped_section_questions} questions could not be mapped to a section.") |
|
|
|
return { |
|
"overall_score": overall_stats["score"], |
|
"overall_correct_full": overall_stats["correct"], |
|
"overall_partial_correct": overall_stats["partial_correct"], |
|
"overall_incorrect_choice": overall_stats["incorrect"], |
|
"overall_skipped": overall_stats["skipped"], |
|
"overall_api_parse_failures": overall_stats["api_parse_failures"], |
|
"total_questions_processed": len(results), |
|
"total_possible_score_for_processed_questions": overall_stats["total_possible_score"], |
|
"unmapped_section_questions": unmapped_section_questions, |
|
"section_breakdown": section_stats, |
|
"question_type_breakdown": question_type_breakdown |
|
} |
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
print("Running evaluation tests...") |
|
|
|
|
|
print("\n--- Testing calculate_accuracy ---") |
|
preds1_str = [["1"], ["2"], ["1", "3"]] |
|
truths1_str = [["1"], ["2"], ["3", "1"]] |
|
acc1_str = calculate_accuracy(preds1_str, truths1_str) |
|
print(f"Test Case 1 (Accuracy - String): Preds={preds1_str}, Truths={truths1_str} -> Accuracy: {acc1_str} (Expected: 1.0)") |
|
assert acc1_str == 1.0 |
|
|
|
preds2_str = [["A"], ["B"], ["A", "C"]] |
|
truths2_str = [["a"], ["b"], ["c", "a"]] |
|
acc2_str = calculate_accuracy(preds2_str, truths2_str) |
|
print(f"Test Case 2 (Accuracy - String Case-Insensitive): Preds={preds2_str}, Truths={truths2_str} -> Accuracy: {acc2_str} (Expected: 1.0)") |
|
assert acc2_str == 1.0 |
|
|
|
preds3_str = [["10"], None, ["5"]] |
|
truths3_str = [["10"], ["7"], ["5"]] |
|
acc3_str = calculate_accuracy(preds3_str, truths3_str) |
|
print(f"Test Case 3 (Accuracy - String with None): Preds={preds3_str}, Truths={truths3_str} -> Accuracy: {acc3_str} (Expected: {2/3})") |
|
assert acc3_str == (2/3) |
|
|
|
|
|
|
|
print("\n--- Testing calculate_exam_scores ---") |
|
test_results_exam = [ |
|
|
|
{"question_id": "N001", "subject": "Physics", "exam_name": "NEET", "question_type": "MCQ_SINGLE_CORRECT", "ground_truth": ["1"], "predicted_answer": ["1"], "api_call_successful": True}, |
|
{"question_id": "N002", "subject": "Physics", "exam_name": "NEET", "question_type": "MCQ_SINGLE_CORRECT", "ground_truth": ["D"], "predicted_answer": ["B"], "api_call_successful": True}, |
|
{"question_id": "N003", "subject": "Chemistry", "exam_name": "NEET", "question_type": "MCQ_SINGLE_CORRECT", "ground_truth": ["4"], "predicted_answer": "SKIP", "api_call_successful": True}, |
|
{"question_id": "N004", "subject": "Chemistry", "exam_name": "NEET", "question_type": "MCQ_SINGLE_CORRECT", "ground_truth": ["C"], "predicted_answer": None, "api_call_successful": False}, |
|
{"question_id": "N005", "subject": "Botany", "exam_name": "NEET", "question_type": "MCQ_SINGLE_CORRECT", "ground_truth": ["A"], "predicted_answer": None, "api_call_successful": True}, |
|
|
|
|
|
{"question_id": "JM001", "subject": "Maths", "exam_name": "JEE_MAIN", "question_type": "MCQ_SINGLE_CORRECT", "ground_truth": ["2"], "predicted_answer": ["2"], "api_call_successful": True}, |
|
{"question_id": "JM002", "subject": "Maths", "exam_name": "JEE_MAIN", "question_type": "MCQ_SINGLE_CORRECT", "ground_truth": ["C"], "predicted_answer": ["a"], "api_call_successful": True}, |
|
|
|
{"question_id": "JM003", "subject": "Physics", "exam_name": "JEE_MAIN", "question_type": "INTEGER", "ground_truth": ["5"], "predicted_answer": ["5"], "api_call_successful": True}, |
|
{"question_id": "JM004", "subject": "Physics", "exam_name": "JEE_MAIN", "question_type": "INTEGER", "ground_truth": ["10"], "predicted_answer": ["8"], "api_call_successful": True}, |
|
{"question_id": "JM005", "subject": "Chemistry", "exam_name": "JEE_MAIN", "question_type": "INTEGER", "ground_truth": ["7"], "predicted_answer": None, "api_call_successful": True}, |
|
|
|
|
|
{"question_id": "JA001", "subject": "Maths", "exam_name": "JEE_ADVANCED", "question_type": "MCQ_SINGLE_CORRECT", "ground_truth": ["A"], "predicted_answer": ["a"], "api_call_successful": True}, |
|
{"question_id": "JA002", "subject": "Maths", "exam_name": "JEE_ADVANCED", "question_type": "MCQ_SINGLE_CORRECT", "ground_truth": ["B"], "predicted_answer": ["C"], "api_call_successful": True}, |
|
|
|
{"question_id": "JA003", "subject": "Physics", "exam_name": "JEE_ADVANCED", "question_type": "INTEGER", "ground_truth": ["12"], "predicted_answer": ["12"], "api_call_successful": True}, |
|
{"question_id": "JA004", "subject": "Physics", "exam_name": "JEE_ADVANCED", "question_type": "INTEGER", "ground_truth": ["0"], "predicted_answer": ["1"], "api_call_successful": True}, |
|
|
|
{"question_id": "JA005", "subject": "Chemistry", "exam_name": "JEE_ADVANCED", "question_type": "MCQ_MULTIPLE_CORRECT", "ground_truth": ["A", "C"], "predicted_answer": ["a", "c"], "api_call_successful": True}, |
|
{"question_id": "JA006", "subject": "Chemistry", "exam_name": "JEE_ADVANCED", "question_type": "MCQ_MULTIPLE_CORRECT", "ground_truth": ["A", "B", "C"], "predicted_answer": ["a", "b"], "api_call_successful": True}, |
|
{"question_id": "JA007", "subject": "Chemistry", "exam_name": "JEE_ADVANCED", "question_type": "MCQ_MULTIPLE_CORRECT", "ground_truth": ["A", "B", "C", "D"], "predicted_answer": ["a", "b", "c"], "api_call_successful": True}, |
|
{"question_id": "JA008", "subject": "Chemistry", "exam_name": "JEE_ADVANCED", "question_type": "MCQ_MULTIPLE_CORRECT", "ground_truth": ["A", "B"], "predicted_answer": ["a"], "api_call_successful": True}, |
|
{"question_id": "JA009", "subject": "Chemistry", "exam_name": "JEE_ADVANCED", "question_type": "MCQ_MULTIPLE_CORRECT", "ground_truth": ["A", "C"], "predicted_answer": ["a", "b"], "api_call_successful": True}, |
|
{"question_id": "JA010", "subject": "Chemistry", "exam_name": "JEE_ADVANCED", "question_type": "MCQ_MULTIPLE_CORRECT", "ground_truth": ["A", "C"], "predicted_answer": ["b", "d"], "api_call_successful": True}, |
|
{"question_id": "JA011", "subject": "Chemistry", "exam_name": "JEE_ADVANCED", "question_type": "MCQ_MULTIPLE_CORRECT", "ground_truth": ["A", "C"], "predicted_answer": "SKIP", "api_call_successful": True}, |
|
{"question_id": "JA012", "subject": "Maths", "exam_name": "JEE_ADVANCED", "question_type": "MCQ_MULTIPLE_CORRECT", "ground_truth": ["A"], "predicted_answer": ["a"], "api_call_successful": True}, |
|
{"question_id": "JA013", "subject": "Physics", "exam_name": "JEE_ADVANCED", "question_type": "MCQ_MULTIPLE_CORRECT", "ground_truth": ["A","B","C"], "predicted_answer": ["a","d"], "api_call_successful": True}, |
|
] |
|
|
|
exam_summary = calculate_exam_scores(test_results_exam) |
|
print("\nExam Score Summary:") |
|
import json |
|
print(json.dumps(exam_summary, indent=2, sort_keys=True)) |
|
|
|
|
|
assert exam_summary["overall_score"] == (4-1+0-1-1) + (4-1) + (4+0+0) + (3-1) + (4+0) + (4+2+3+1-2-2+0+4-2) |
|
assert exam_summary["overall_correct_full"] == 8 |
|
assert exam_summary["overall_partial_correct"] == 3 |
|
assert exam_summary["overall_incorrect_choice"] == 7 |
|
assert exam_summary["overall_skipped"] == 2 |
|
assert exam_summary["overall_api_parse_failures"] == 3 |
|
|
|
assert exam_summary["section_breakdown"]["Physics"]["score"] == (4-1) + (4+0) + (4+0) - 2 |
|
assert exam_summary["section_breakdown"]["Chemistry"]["score"] == (0-1) + (0) + (4+2+3+1-2-2+0) |
|
assert exam_summary["section_breakdown"]["Botany"]["score"] == -1 |
|
assert exam_summary["section_breakdown"]["Maths"]["score"] == (4-1) + (3-1) + 4 |
|
|
|
print("\nEvaluation tests completed.") |
|
|