File size: 52,143 Bytes
de60742
 
 
 
 
ea74ed3
5891c9a
de60742
 
 
 
5891c9a
 
 
 
 
 
 
 
de60742
 
 
6bc6a63
5891c9a
de60742
 
 
 
5891c9a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
de60742
 
 
 
 
 
 
 
 
 
 
 
 
 
ea74ed3
 
a2cb2dd
 
 
 
 
 
 
de60742
ea74ed3
a2cb2dd
ea74ed3
de60742
ea74ed3
 
 
de60742
a2cb2dd
 
de60742
a2cb2dd
 
 
ea74ed3
a2cb2dd
ea74ed3
a2cb2dd
 
ea74ed3
a2cb2dd
ea74ed3
 
 
 
 
 
c7f64e6
 
ea74ed3
45ee7d5
 
 
 
 
 
 
ea74ed3
 
c7f64e6
 
 
 
ea74ed3
c7f64e6
45ee7d5
 
 
 
ea74ed3
 
c7f64e6
6bc6a63
ea74ed3
6bc6a63
ea74ed3
a2cb2dd
6bc6a63
 
 
ea74ed3
 
 
 
a2cb2dd
 
6bc6a63
a2cb2dd
6bc6a63
 
 
 
ea74ed3
 
6bc6a63
ea74ed3
a2cb2dd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ea74ed3
6bc6a63
 
ea74ed3
c7f64e6
 
6bc6a63
c7f64e6
 
 
 
6bc6a63
ea74ed3
a2cb2dd
6bc6a63
a2cb2dd
6bc6a63
 
 
 
c7f64e6
6bc6a63
 
 
a2cb2dd
6bc6a63
ea74ed3
 
6bc6a63
ea74ed3
 
6bc6a63
ea74ed3
 
 
 
 
 
de60742
ea74ed3
 
 
de60742
 
5891c9a
 
 
 
 
 
 
 
 
ea74ed3
de60742
5891c9a
 
 
de60742
ea74ed3
 
 
de60742
ea74ed3
de60742
 
 
 
 
 
 
1ab1a75
de60742
c7f64e6
de60742
 
 
 
 
5891c9a
 
 
 
 
 
 
 
 
 
 
c7f64e6
5891c9a
 
 
 
c7f64e6
5891c9a
c7f64e6
5891c9a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c7f64e6
de60742
ea74ed3
de60742
5891c9a
 
 
de60742
ea74ed3
 
5891c9a
c7f64e6
5891c9a
 
 
 
 
 
 
 
 
 
c7f64e6
 
5891c9a
c7f64e6
ea74ed3
 
a2cb2dd
ea74ed3
 
 
 
 
 
5891c9a
 
 
 
 
 
 
ea74ed3
5891c9a
 
de60742
6bc6a63
 
 
ea74ed3
1ab1a75
ea74ed3
 
 
 
6bc6a63
 
ea74ed3
 
 
 
6bc6a63
ea74ed3
45ee7d5
 
 
ea74ed3
de60742
 
ea74ed3
45ee7d5
6bc6a63
45ee7d5
de60742
 
6bc6a63
 
 
 
ea74ed3
6bc6a63
 
 
 
 
ea74ed3
45ee7d5
ea74ed3
 
c7f64e6
ea74ed3
45ee7d5
ea74ed3
45ee7d5
 
ea74ed3
 
6bc6a63
 
ea74ed3
 
 
5891c9a
 
 
 
 
 
ea74ed3
5891c9a
6bc6a63
5891c9a
45ee7d5
6bc6a63
45ee7d5
ea74ed3
45ee7d5
 
ea74ed3
 
 
 
6bc6a63
 
ea74ed3
6bc6a63
ea74ed3
6bc6a63
ea74ed3
de60742
6bc6a63
c7f64e6
 
 
6bc6a63
5891c9a
 
 
 
 
 
c7f64e6
5891c9a
ea74ed3
6bc6a63
c7f64e6
 
6bc6a63
ea74ed3
6bc6a63
5891c9a
 
 
 
 
a2cb2dd
 
 
 
 
 
 
 
 
 
 
ea74ed3
a2cb2dd
ea74ed3
 
5891c9a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
de60742
 
5891c9a
 
ea74ed3
 
6bc6a63
 
ea74ed3
5891c9a
 
6bc6a63
ea74ed3
 
5891c9a
 
 
 
 
 
 
 
 
6bc6a63
 
 
 
 
 
 
 
 
 
 
 
 
de60742
 
 
6bc6a63
 
45ee7d5
 
 
ea74ed3
de60742
ea74ed3
45ee7d5
 
ea74ed3
 
6bc6a63
 
 
 
ea74ed3
 
 
 
6bc6a63
 
45ee7d5
de60742
c7f64e6
6bc6a63
45ee7d5
ea74ed3
45ee7d5
 
ea74ed3
 
6bc6a63
 
ea74ed3
 
 
5891c9a
 
 
 
 
 
6bc6a63
5891c9a
 
 
6bc6a63
ea74ed3
45ee7d5
 
6bc6a63
ea74ed3
6bc6a63
 
 
 
ea74ed3
 
6bc6a63
 
c7f64e6
6bc6a63
5891c9a
 
 
 
 
 
6bc6a63
5891c9a
6bc6a63
 
 
ea74ed3
6bc6a63
 
 
 
 
5891c9a
 
 
 
 
a2cb2dd
 
 
 
 
 
 
 
 
 
 
6bc6a63
a2cb2dd
ea74ed3
5891c9a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ea74ed3
 
6bc6a63
 
 
 
5891c9a
 
 
6bc6a63
 
5891c9a
 
 
 
6bc6a63
5891c9a
 
6bc6a63
 
5891c9a
6bc6a63
 
ea74ed3
6bc6a63
de60742
 
a2cb2dd
ea74ed3
a2cb2dd
 
ea74ed3
 
a2cb2dd
 
ea74ed3
de60742
 
 
 
 
5891c9a
 
 
 
 
 
 
 
 
 
 
 
 
de60742
 
 
 
5891c9a
 
de60742
 
5891c9a
de60742
5891c9a
 
 
de60742
 
 
 
 
 
c7f64e6
 
 
5891c9a
 
 
c7f64e6
 
 
5891c9a
 
 
 
 
 
 
 
 
 
c7f64e6
de60742
 
5891c9a
 
 
 
 
 
 
 
 
 
 
 
de60742
 
 
5891c9a
de60742
5891c9a
 
 
 
 
 
 
 
 
de60742
c7f64e6
5891c9a
 
 
 
 
 
 
c7f64e6
de60742
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
import argparse
import yaml
import os
import json
import logging
import datetime # Added for timestamp
from typing import Dict, Any, List, Set, Tuple # Added for type hinting
from datasets import load_dataset, Image as HFImage # Import Image feature type
from tqdm import tqdm
from PIL import Image as PILImage # Import PIL for type hinting

# ANSI escape codes for colors
GREEN = '\033[92m'
RED = '\033[91m'
RESET = '\033[0m'
YELLOW = '\033[93m' # For skipped
CYAN = '\033[96m' # For parse failures
MAGENTA = '\033[95m' # For API failures

# Import local modules
from utils import load_api_key
from llm_interface import get_openrouter_prediction
# Import evaluation functions
from evaluation import calculate_accuracy, calculate_exam_scores, calculate_single_question_score_details

# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

def get_available_models(config_path: str) -> List[str]:
    """Loads models from the benchmark configuration YAML file."""
    try:
        with open(config_path, 'r') as f:
            config = yaml.safe_load(f)
        models = config.get("openrouter_models", [])
        if not models:
            logging.warning(f"No models found in {config_path} under 'openrouter_models'.")
        return models
    except FileNotFoundError:
        logging.error(f"Configuration file not found at {config_path} for model retrieval.")
        return []
    except yaml.YAMLError as e:
        logging.error(f"Error parsing configuration file {config_path} for model retrieval: {e}")
        return []
    except Exception as e:
        logging.error(f"Unexpected error retrieving models from {config_path}: {e}")
        return []

def get_available_exam_details(metadata_path: str) -> Tuple[List[str], List[str]]:
    """Reads metadata.jsonl to get unique exam names and years."""
    exam_names: Set[str] = set()
    exam_years: Set[str] = set()
    try:
        with open(metadata_path, 'r') as f:
            for line in f:
                try:
                    data = json.loads(line)
                    if 'exam_name' in data:
                        exam_names.add(data['exam_name'])
                    if 'exam_year' in data:
                        exam_years.add(str(data['exam_year']))
                except json.JSONDecodeError:
                    logging.warning(f"Skipping malformed JSON line in {metadata_path}: {line.strip()}")
        
        sorted_exam_names = sorted(list(exam_names))
        sorted_exam_years = sorted(list(exam_years))
        
        if not sorted_exam_names:
            logging.warning(f"No exam names found in {metadata_path}.")
        if not sorted_exam_years:
            logging.warning(f"No exam years found in {metadata_path}.")
            
        return sorted_exam_names, sorted_exam_years
    except FileNotFoundError:
        logging.error(f"Metadata file not found at {metadata_path}.")
        return [], []
    except Exception as e:
        logging.error(f"Unexpected error reading or parsing {metadata_path}: {e}")
        return [], []

def load_config(config_path: str) -> dict:
    """Loads the benchmark configuration from a YAML file."""
    try:
        with open(config_path, 'r') as f:
            config = yaml.safe_load(f)
        logging.info(f"Configuration loaded from {config_path}")
        return config
    except FileNotFoundError:
        logging.error(f"Configuration file not found at {config_path}")
        raise
    except yaml.YAMLError as e:
        logging.error(f"Error parsing configuration file {config_path}: {e}")
        raise

def append_prediction(result: Dict[str, Any], filepath: str):
    """Appends a single prediction result to a JSONL file."""
    # Create a copy to avoid modifying the original dict that might be used elsewhere
    # and remove evaluation-specific fields before saving to predictions.jsonl
    prediction_data = result.copy()
    prediction_data.pop('marks_awarded', None)
    prediction_data.pop('evaluation_status', None)
    prediction_data.pop('predicted_answer', None) # Remove predicted_answer
    prediction_data.pop('ground_truth', None) # Remove ground_truth
    try:
        with open(filepath, 'a') as f:
            json.dump(prediction_data, f)
            f.write('\n')
    except IOError as e:
        logging.error(f"Failed to append prediction to {filepath}: {e}")
    except Exception as e:
        logging.error(f"Unexpected error appending prediction to {filepath}: {e}")

def append_summary_detail(result_detail: Dict[str, Any], filepath: str):
    """Appends a single question's summary details (evaluation status, marks, predicted, truth) to a JSONL file."""
    try:
        with open(filepath, 'a') as f:
            json.dump(result_detail, f)
            f.write('\n')
    except IOError as e:
        logging.error(f"Failed to append summary detail to {filepath}: {e}")
    except Exception as e:
        logging.error(f"Unexpected error appending summary detail to {filepath}: {e}")


# Removed save_summary function as summary.json is no longer needed.

def generate_markdown_summary(summary: Dict[str, Any], filepath: str):
    """Generates a human-readable Markdown summary from the results dictionary."""
    try:
        md_content = []
        model_name = summary.get("model_name", "N/A")
        exam_name = summary.get("exam_name", "N/A")
        exam_year = summary.get("exam_year", "N/A")
        timestamp = summary.get("timestamp", "N/A")
        total_questions_in_dataset = summary.get("total_questions_in_dataset", 0)
        total_questions_processed_in_run = summary.get("total_questions_processed_in_run", 0)

        filtered_questions_count = 0
        if total_questions_in_dataset > 0 and total_questions_processed_in_run > 0:
             filtered_questions_count = total_questions_in_dataset - total_questions_processed_in_run


        md_content.append(f"# Benchmark Results: {model_name}")
        if exam_name and exam_name not in ["N/A", "All_Exams"]: # Only display if a specific exam was targeted
            md_content.append(f"**Exam Name:** {exam_name}")
        if exam_year and exam_year not in ["N/A", "All_Years"]: # Only display if a specific year was targeted
            md_content.append(f"**Exam Year:** {exam_year}")
        md_content.append(f"**Timestamp:** {timestamp}")
        md_content.append(f"**Total Questions in Dataset:** {total_questions_in_dataset if total_questions_in_dataset > 0 else 'N/A'}")
        if filtered_questions_count > 0:
            md_content.append(f"**Questions Filtered Out:** {filtered_questions_count}")
        md_content.append(f"**Total Questions Processed in this Run:** {total_questions_processed_in_run}")
        # md_content.append(f"**Estimated Total API Cost:** ${total_api_cost:.6f}") # Removed
        md_content.append("\n---\n")

        # Check if NEET results are present (or any dataset with overall_score and section_breakdown)
        if "overall_score" in summary and "section_breakdown" in summary: # Generic check for score-based summary
            total_processed = summary.get("total_questions_processed", 0)
            
            overall_score = summary.get('overall_score', 'N/A')
            total_possible_score = summary.get('total_possible_score_for_processed_questions', 'N/A')
            correct_full_count = summary.get('overall_correct_full', 'N/A')
            partial_correct_count = summary.get('overall_partial_correct', 'N/A')
            incorrect_choice_count = summary.get('overall_incorrect_choice', 'N/A')
            skipped_count = summary.get('overall_skipped', 'N/A')
            failures_count = summary.get('overall_api_parse_failures', 'N/A')
            unmapped_count = summary.get('unmapped_section_questions', 'N/A')

            md_content.append("## Exam Scoring Results")
            md_content.append(f"**Overall Score:** **{overall_score}** / **{total_possible_score}**")
            md_content.append(f"- **Fully Correct Answers:** {correct_full_count}")
            if partial_correct_count != 'N/A' and partial_correct_count > 0 :
                md_content.append(f"- **Partially Correct Answers:** {partial_correct_count}")
            md_content.append(f"- **Incorrectly Answered (Choice Made):** {incorrect_choice_count}")
            md_content.append(f"- **Skipped Questions:** {skipped_count}")
            md_content.append(f"- **API/Parse Failures:** {failures_count}")
            md_content.append(f"- **Total Questions Processed:** {total_processed}")
            if unmapped_count > 0:
                md_content.append(f"- **Unmapped Section Questions:** {unmapped_count} *(Not included in section breakdown)*")

            md_content.append("\n### Detailed Score Calculation by Question Type")
            question_type_breakdown = summary.get("question_type_breakdown", {})
            if question_type_breakdown:
                sorted_q_types = sorted(question_type_breakdown.keys())
                for q_type in sorted_q_types:
                    stats = question_type_breakdown[q_type]
                    q_type_display = q_type.replace('_', ' ').title()
                    max_score_per_q = stats.get('max_score_per_question', 0)
                    
                    correct_count_q = stats.get('correct_full', 0)
                    partial_count_q = stats.get('partial_correct', 0)
                    incorrect_count_q = stats.get('incorrect_choice', 0)
                    skipped_count_q = stats.get('skipped', 0)
                    api_fail_count_q = stats.get('api_parse_failures', 0)
                    score_q = stats.get('score', 0)

                    calculation_parts = []
                    if correct_count_q > 0:
                        calculation_parts.append(f"{correct_count_q} Correct (+{max_score_per_q})")
                    if partial_count_q > 0:
                        # For partial, we can't easily show the exact score per question without more detail
                        # For now, just indicate partials.
                        calculation_parts.append(f"{partial_count_q} Partial")
                    if incorrect_count_q > 0:
                        # Need to know penalty for incorrect. Assuming -1 for MCQ_SINGLE_CORRECT, -2 for MCQ_MULTIPLE_CORRECT
                        # For INTEGER, penalty is 0. This needs to be more robust if penalties vary.
                        penalty_per_incorrect = 0
                        if q_type == "MCQ_SINGLE_CORRECT": penalty_per_incorrect = -1
                        elif q_type == "MCQ_MULTIPLE_CORRECT": penalty_per_incorrect = -2
                        calculation_parts.append(f"{incorrect_count_q} Incorrect ({penalty_per_incorrect})")
                    if skipped_count_q > 0:
                        calculation_parts.append(f"{skipped_count_q} Skipped (0)")
                    if api_fail_count_q > 0:
                        # Assuming -1 for API/Parse failures for non-integer types, 0 for integer
                        penalty_per_api_fail = -1
                        if q_type == "INTEGER": penalty_per_api_fail = 0
                        calculation_parts.append(f"{api_fail_count_q} API/Parse Fail ({penalty_per_api_fail})")
                    
                    calculation_str = " + ".join(part for part in calculation_parts if part)
                    if not calculation_str:
                        calculation_str = "No questions of this type processed or all had 0 score change."

                    md_content.append(f"**{q_type_display} ({stats.get('count', 0)} questions):** {score_q} marks")
                    md_content.append(f"  *Calculation:* {calculation_str} = {score_q}")
            else:
                md_content.append("No question type breakdown available.")

            md_content.append("\n### Section Breakdown")
            md_content.append("| Section       | Score | Fully Correct | Partially Correct | Incorrect Choice | Skipped | API/Parse Failures |")
            md_content.append("|---------------|-------|---------------|-------------------|------------------|---------|--------------------|")
            section_breakdown = summary.get("section_breakdown", {})
            
            sorted_section_names = sorted(section_breakdown.keys())
            if not sorted_section_names and section_breakdown:
                logging.warning("Could not sort section names for Markdown summary; using unsorted.")
                sorted_section_names = list(section_breakdown.keys())

            for section_name in sorted_section_names:
                stats = section_breakdown.get(section_name, {})
                score = stats.get('score', 'N/A')
                s_correct = stats.get('correct', 'N/A')
                s_partial = stats.get('partial_correct', 'N/A')
                s_incorrect = stats.get('incorrect', 'N/A')
                s_skipped = stats.get('skipped', 'N/A')
                s_failures = stats.get('api_parse_failures', 'N/A')
                display_section_name = section_name.replace('_', ' ')
                md_content.append(f"| {display_section_name:<13} | {score:<5} | {s_correct:<13} | {s_partial:<17} | {s_incorrect:<16} | {s_skipped:<7} | {s_failures:<18} |")
            if not sorted_section_names:
                md_content.append("| No section data available | N/A   | N/A           | N/A               | N/A              | N/A     | N/A                |")
        
        # Fallback for simple accuracy (if exam scoring wasn't applicable or failed)
        elif "accuracy_on_parsed" in summary:
            md_content.append("## Simple Accuracy Results (Fallback)")
            md_content.append(f"- **Accuracy (on successfully parsed non-skipped):** {summary.get('accuracy_on_parsed', 'N/A'):.4f}")
            md_content.append(f"- **Total Processed Attempts:** {summary.get('total_processed_attempts', 'N/A')}")
            # Add other relevant simple stats if available
        else:
            md_content.append("## Summary")
            md_content.append("*(No specific Exam Scoring or Accuracy metrics found in summary)*")


        with open(filepath, 'w') as f:
            f.write("\n".join(md_content))
        logging.info(f"Markdown summary saved to {filepath}")

    except IOError as e:
        logging.error(f"Failed to save markdown summary to {filepath}: {e}")
    except Exception as e:
        logging.error(f"Unexpected error generating or saving markdown summary to {filepath}: {e}")


def run_benchmark(
    config: dict, 
    api_key: str, 
    model_to_run: str, # Changed from models_override
    output_dir_override: str | None = None, 
    exam_name_choice: str | None = None, # Changed from exam_name_filter
    exam_year_choice: str | None = None,   # Changed from exam_year_filter
    question_ids_str: str | None = None # New argument
):
    """Runs the benchmark evaluation loop with incremental saving and retries."""

    # Determine models to run - now it's a single model
    models_to_run = [model_to_run] # Benchmark will run for the single specified model
    logging.info(f"Target model for this run: {model_to_run}")

     # Determine base output directory
    base_output_dir = output_dir_override if output_dir_override else config.get("results_base_dir", "results")
    os.makedirs(base_output_dir, exist_ok=True)

     # Load dataset
    dataset_path = config.get("dataset_path", ".") # Default to current dir if not specified
    try:
        # Load the dataset using the loading script from the specified path
        # Ensure the 'image' column is decoded
        # Explicitly specify data_files and data_dir for local loading.
        # data_dir should be the project root ('.') when loading a local script,
        # as the script is copied to a cache and needs to know where the actual data is.
        dataset = load_dataset(dataset_path, split='test', data_files={'test': 'data/metadata.jsonl'}, data_dir=os.getcwd(), trust_remote_code=True, download_mode="force_redownload")
        dataset = dataset.cast_column("image", HFImage(decode=True)) # Ensure images are loaded as PIL
        logging.info(f"Dataset loaded successfully from path: {dataset_path}. Original number of questions: {len(dataset)}")
    except Exception as e:
        logging.error(f"Failed to load dataset from path '{dataset_path}': {e}")
        logging.error("Ensure the path is correct and 'jee_neet_benchmark_dataset.py' exists.")
        return

    # Filter dataset based on choices
    original_dataset_size = len(dataset)
    
    # Filter by exam_name
    if exam_name_choice and exam_name_choice.lower() != "all":
        logging.info(f"Filtering dataset for exam_name: '{exam_name_choice}'")
        dataset = dataset.filter(lambda example: example.get('exam_name') == exam_name_choice)
        logging.info(f"Dataset size after exam_name filter: {len(dataset)} questions.")

    # Filter by exam_year
    if exam_year_choice and exam_year_choice.lower() != "all":
        try:
            filter_year_int = int(exam_year_choice)
            logging.info(f"Filtering dataset for exam_year: {filter_year_int}")
            dataset = dataset.filter(lambda example: example.get('exam_year') == filter_year_int)
            logging.info(f"Dataset size after exam_year filter: {len(dataset)} questions.")
        except ValueError:
            logging.error(f"Invalid exam_year provided: '{exam_year_choice}'. Must be an integer or 'all'. Year filtering skipped.")

    # Filter by specific question IDs if provided
    if question_ids_str:
        try:
            target_question_ids = {q_id.strip() for q_id in question_ids_str.split(',') if q_id.strip()}
            if target_question_ids:
                logging.info(f"Filtering dataset for specific question IDs: {target_question_ids}")
                dataset = dataset.filter(lambda example: example.get('question_id') in target_question_ids)
                logging.info(f"Dataset size after question_id filter: {len(dataset)} questions.")
            else:
                logging.warning("Empty or invalid question_ids string provided. No question ID filtering applied.")
        except Exception as e:
            logging.error(f"Error processing question_ids_str '{question_ids_str}': {e}. No question ID filtering applied.")
            
    if len(dataset) < original_dataset_size:
        logging.info(f"Final dataset size after all filters: {len(dataset)} (originally {original_dataset_size}).")
    
    if len(dataset) == 0:
        logging.warning("No questions to process after filtering. Skipping model benchmark.")
        return

     # --- Main Loop: Iterate through models ---
    for model_id in models_to_run:
        # total_questions here should refer to the length of the potentially filtered dataset
        current_total_questions = len(dataset) 
        logging.info(f"--- Starting benchmark for model: {model_id} (Processing {current_total_questions} questions) ---")

        # Create timestamped output directory for this model run
        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        safe_model_name = model_id.replace('/', '_') # model_id is already the single model_to_run
        dir_name_parts = [safe_model_name]
        
        # Append exam name and year to directory if they are specific (not "all")
        current_exam_name_for_dir = exam_name_choice if exam_name_choice and exam_name_choice.lower() != "all" else "AllExams"
        current_exam_year_for_dir = exam_year_choice if exam_year_choice and exam_year_choice.lower() != "all" else "AllYears"

        if current_exam_name_for_dir != "AllExams":
            dir_name_parts.append(current_exam_name_for_dir.replace('/', '_'))
        if current_exam_year_for_dir != "AllYears":
             dir_name_parts.append(str(current_exam_year_for_dir)) # Already string or 'all'
        
        dir_name_parts.append(timestamp)
        
        model_output_dir_name = "_".join(filter(None, dir_name_parts)) # Filter out None if any part was None
        model_output_dir = os.path.join(base_output_dir, model_output_dir_name)
        os.makedirs(model_output_dir, exist_ok=True)
        predictions_path = os.path.join(model_output_dir, "predictions.jsonl")
        summary_details_path = os.path.join(model_output_dir, "summary.jsonl") # New file for per-question summary details
        markdown_summary_path = os.path.join(model_output_dir, "summary.md") # Define path for MD summary
        logging.info(f"Results for {model_id} will be saved to: {model_output_dir}")

        model_results = [] # Stores results in memory for final calculation
        failed_questions_data = [] # Stores data needed to retry failed questions

        # Counters for tqdm postfix
        initial_correct_count = 0
        initial_incorrect_count = 0
        initial_skipped_count = 0
        initial_parse_fail_count = 0
        initial_api_fail_count = 0

        # --- Initial Pass: Iterate through questions ---
        pbar_initial = tqdm(dataset, desc=f"Processing {model_id} (Initial Pass)", total=current_total_questions)
        for example in pbar_initial:
            question_id = example["question_id"]
            subject = example["subject"]
            exam_name_from_data = example.get("exam_name", "UNKNOWN_EXAM") # Get exam_name from data
            question_type_from_data = example.get("question_type", "MCQ_SINGLE_CORRECT") # Get question_type
            image: PILImage.Image = example["image"]
            truth = json.loads(example["correct_answer"]) # Parse the JSON string back to a list/list of lists

            result_data = {
                "question_id": question_id,
                "subject": subject,
                "exam_name": exam_name_from_data, # Store for evaluation
                "question_type": question_type_from_data, # Store for evaluation
                "ground_truth": truth,
                "predicted_answer": None,
                "raw_response": None,
                "parse_successful": False,
                "api_call_successful": False,
                "error": None,
                "attempt": 1,
                # "api_cost": None, # Removed
                "previous_raw_response_on_reprompt": None # For task 1
            }

            try:
                # --- Initial API Call ---
                logging.info(f"Attempting API call for question: {question_id} with model: {model_id}")
                # Pass exam_name_from_data and question_type_from_data to get_openrouter_prediction
                parsed_answer, raw_response = get_openrouter_prediction( # No longer expect api_cost
                    model_identifier=model_id,
                    api_key=api_key,
                    image=image,
                    exam_name=exam_name_from_data, # Use exam_name from current data item
                    exam_year=str(example.get("exam_year", "UNKNOWN_YEAR")), # Use exam_year from data
                    question_type=question_type_from_data, # Pass question_type
                    max_tokens=config.get("max_tokens", 100),
                    request_timeout=config.get("request_timeout", 60)
                )
                
                api_success_attempt1 = True # If no exception, API call itself was successful
                parse_success_attempt1 = parsed_answer is not None
                raw_response_attempt1 = raw_response
                # result_data["api_cost"] = api_cost # Removed

                # --- Re-prompt Logic ---
                if api_success_attempt1 and not parse_success_attempt1 and raw_response_attempt1 is not None:
                    logging.warning(f"Question {question_id}: Initial parse failed. Attempting re-prompt.")
                    result_data["previous_raw_response_on_reprompt"] = raw_response_attempt1 # Store previous response
                    try:
                        # Assuming re-prompt might also have a cost
                        parsed_answer_rp, raw_response_rp = get_openrouter_prediction( # No longer expect api_cost
                            model_identifier=model_id,
                            api_key=api_key,
                            previous_raw_response=raw_response_attempt1,
                            question_type=question_type_from_data, # Pass question_type for re-prompt
                            max_tokens=config.get("max_tokens", 100),
                            request_timeout=config.get("request_timeout", 60)
                        )
                        # Process parsed_answer_rp before assignment
                        if isinstance(parsed_answer_rp, list):
                            processed_answer_rp = [str(item) for item in parsed_answer_rp]
                        else:
                            processed_answer_rp = parsed_answer_rp

                        result_data.update({
                            "predicted_answer": processed_answer_rp,
                            "raw_response": raw_response_rp,
                            "parse_successful": processed_answer_rp is not None,
                            "api_call_successful": True,
                            "attempt": 2
                            # Assuming api_cost_rp would be added to existing api_cost or handled separately
                        })
                        # if api_cost_rp is not None: # Add re-prompt cost if available # Removed
                        #     result_data["api_cost"] = (result_data.get("api_cost") or 0.0) + api_cost_rp # Removed
                        logging.info(f"Question {question_id}: Re-prompt {'succeeded' if result_data['parse_successful'] else 'failed to parse'}.")
                    except Exception as e_rp:
                        logging.error(f"Re-prompt API call failed for question {question_id}: {e_rp}")
                        result_data.update({
                            "predicted_answer": None, 
                            "raw_response": raw_response_attempt1, 
                            "parse_successful": False,
                            "api_call_successful": True, 
                            "error": f"Initial parse failed. Re-prompt API call failed: {str(e_rp)}",
                            "attempt": 1 
                        })
                else:
                    current_error = result_data.get("error") 
                    api_actually_successful = api_success_attempt1
                    if api_success_attempt1 and raw_response_attempt1 is None and parsed_answer is None:
                        current_error = "Initial API call returned empty content. Re-prompt skipped."
                    
                    # Process parsed_answer before assignment
                    if isinstance(parsed_answer, list):
                        processed_initial_answer = [str(item) for item in parsed_answer]
                    else:
                        processed_initial_answer = parsed_answer

                    result_data.update({
                        "predicted_answer": processed_initial_answer,
                        "raw_response": raw_response_attempt1,
                        "parse_successful": parse_success_attempt1,
                        "api_call_successful": api_actually_successful,
                        "error": current_error,
                        "attempt": 1 
                    })
                
                # Calculate score details for the current result_data
                score_details = calculate_single_question_score_details(result_data)
                result_data['marks_awarded'] = score_details.get('marks_awarded')
                result_data['evaluation_status'] = score_details.get('evaluation_status')

                # Append evaluation details to summary.jsonl
                summary_detail_data = {
                    "question_id": question_id,
                    "marks_awarded": result_data['marks_awarded'],
                    "evaluation_status": result_data['evaluation_status'],
                    "predicted_answer": result_data['predicted_answer'], # Add predicted_answer
                    "ground_truth": result_data['ground_truth'], # Add ground_truth
                    "attempt": result_data['attempt']
                }
                append_summary_detail(summary_detail_data, summary_details_path)

                model_results.append(result_data)
                append_prediction(result_data, predictions_path) # append_prediction now handles removing evaluation fields

                final_parsed_answer = result_data["predicted_answer"]
                log_message_prefix = f"Question {question_id}:"
                log_message_suffix = f"(Attempt {result_data['attempt']})"

                if not result_data["api_call_successful"]:
                    initial_api_fail_count += 1
                    logging.info(f"{MAGENTA}{log_message_prefix} API Call Failed {log_message_suffix}{RESET}")
                elif not result_data["parse_successful"]:
                    initial_parse_fail_count += 1
                    logging.info(f"{CYAN}{log_message_prefix} Failed to parse answer {log_message_suffix}{RESET}")
                elif final_parsed_answer == "SKIP":
                    initial_skipped_count += 1
                    logging.info(f"{YELLOW}{log_message_prefix} Skipped {log_message_suffix}{RESET}")
                else: # This 'else' means API call was successful, parse was successful, and predicted_answer was not "SKIP" (by model)
                    marks_awarded = result_data.get('marks_awarded', 0) 
                    evaluation_status_value = result_data.get('evaluation_status')
                    
                    # Prepare evaluation_status for checks and logging
                    is_considered_correct = False
                    log_display_status = "N/A" # What to show in the log for status
                    status_check_string = "" # Uppercase string for "CORRECT" and "SKIPPED" checks

                    if evaluation_status_value is True:
                        is_considered_correct = True
                        log_display_status = "True (Boolean)"
                        status_check_string = "CORRECT_TRUE_BOOLEAN" # Contains "CORRECT"
                    elif isinstance(evaluation_status_value, str):
                        log_display_status = evaluation_status_value # Log original string
                        status_check_string = evaluation_status_value.strip().upper()
                        if "CORRECT" in status_check_string:
                            is_considered_correct = True
                    elif evaluation_status_value is None:
                        log_display_status = "None"
                        status_check_string = "NONE_STATUS"
                    else: # Other types
                        log_display_status = str(evaluation_status_value)
                        status_check_string = str(evaluation_status_value).strip().upper()

                    # Define known "skipped by evaluation" statuses (uppercase)
                    known_eval_skip_statuses = ["SKIPPED_BY_EVAL", "SKIPPED"]

                    if is_considered_correct:
                        initial_correct_count +=1
                        logging.info(f"{GREEN}{log_message_prefix} Correct (log) - Marks: {marks_awarded}, Status: {log_display_status} {log_message_suffix}{RESET}")
                    elif status_check_string in known_eval_skip_statuses:
                        initial_skipped_count += 1 
                        logging.info(f"{YELLOW}{log_message_prefix} Skipped by Eval - Marks: {marks_awarded}, Status: {log_display_status} {log_message_suffix}{RESET}")
                    else: # All other statuses are treated as incorrect
                        initial_incorrect_count += 1
                        logging.info(f"{RED}{log_message_prefix} Incorrect (log) - Marks: {marks_awarded}, Status: {log_display_status} {log_message_suffix}{RESET}")
                
                pbar_initial.set_postfix_str(f"βœ“:{initial_correct_count} βœ—:{initial_incorrect_count} S:{initial_skipped_count} P!:{initial_parse_fail_count} A!:{initial_api_fail_count}")

            except Exception as e:
                initial_api_fail_count +=1 # Assume API failure if exception at this level
                pbar_initial.set_postfix_str(f"βœ“:{initial_correct_count} βœ—:{initial_incorrect_count} S:{initial_skipped_count} P!:{initial_parse_fail_count} A!:{initial_api_fail_count}")
                logging.error(f"Initial API call failed for question {question_id} (Attempt 1): {e}")
                result_data["error"] = str(e)
                result_data["api_call_successful"] = False
                failed_questions_data.append(example) # Store original example for retry pass

        pbar_initial.close() # Ensure tqdm finishes cleanly

        # --- Retry Pass for questions with initial API failures ---
        if failed_questions_data:
            logging.info(f"--- Retrying {len(failed_questions_data)} questions with initial API failures for model: {model_id} ---")
            
            retry_correct_count = 0
            retry_incorrect_count = 0
            retry_skipped_count = 0
            retry_parse_fail_count = 0
            retry_api_fail_count = 0
            
            pbar_retry = tqdm(failed_questions_data, desc=f"Processing {model_id} (API Retry Pass)", total=len(failed_questions_data))
            for example_retry in pbar_retry:
                question_id_retry = example_retry["question_id"]
                subject_retry = example_retry["subject"]
                exam_name_retry = example_retry.get("exam_name", "UNKNOWN_EXAM")
                question_type_retry = example_retry.get("question_type", "MCQ_SINGLE_CORRECT")
                image_retry: PILImage.Image = example_retry["image"]
                truth_retry = example_retry["correct_answer"]

                result_data_retry = {
                    "question_id": question_id_retry,
                    "subject": subject_retry,
                    "exam_name": exam_name_retry,
                    "question_type": question_type_retry,
                    "ground_truth": truth_retry,
                    "predicted_answer": None,
                    "raw_response": None,
                    "parse_successful": False,
                    "api_call_successful": False,
                    "error": "Initial API call failed.", # Pre-fill error
                    "attempt": 2,
                    # "api_cost": None, # Removed
                    "previous_raw_response_on_reprompt_after_api_retry": None # For task 1
                }

                try:
                    logging.info(f"Attempting API call for question: {question_id_retry} (API Retry Pass) with model: {model_id}")
                    parsed_answer_retry, raw_response_retry = get_openrouter_prediction( # No longer expect api_cost
                        model_identifier=model_id,
                        api_key=api_key,
                        image=image_retry,
                        exam_name=exam_name_retry,
                        exam_year=str(example_retry.get("exam_year", "UNKNOWN_YEAR")),
                        question_type=question_type_retry,
                        max_tokens=config.get("max_tokens", 100),
                        request_timeout=config.get("request_timeout", 60)
                    )
                    api_success_attempt2 = True
                    parse_success_attempt2 = parsed_answer_retry is not None
                    raw_response_attempt2 = raw_response_retry
                    # result_data_retry["api_cost"] = api_cost_retry # Removed

                    if api_success_attempt2 and not parse_success_attempt2 and raw_response_attempt2 is not None:
                        logging.warning(f"Question {question_id_retry}: API Retry succeeded, but parse failed. Attempting re-prompt.")
                        result_data_retry["previous_raw_response_on_reprompt_after_api_retry"] = raw_response_attempt2 # Store previous response
                        try:
                            # Assuming re-prompt might also have a cost
                            parsed_answer_rp2, raw_response_rp2 = get_openrouter_prediction( # No longer expect api_cost
                                model_identifier=model_id,
                                api_key=api_key,
                                previous_raw_response=raw_response_attempt2,
                                question_type=question_type_retry,
                                max_tokens=config.get("max_tokens", 100),
                                request_timeout=config.get("request_timeout", 60)
                            )
                            # Process parsed_answer_rp2 before assignment
                            if isinstance(parsed_answer_rp2, list):
                                processed_answer_rp2 = [str(item) for item in parsed_answer_rp2]
                            else:
                                processed_answer_rp2 = parsed_answer_rp2

                            result_data_retry.update({
                                "predicted_answer": processed_answer_rp2, "raw_response": raw_response_rp2,
                                "parse_successful": processed_answer_rp2 is not None, "api_call_successful": True,
                                "error": None if processed_answer_rp2 is not None else "Re-prompt after API retry failed to parse.",
                                "attempt": 3
                            })
                            # if api_cost_rp2 is not None: # Add re-prompt cost if available # Removed
                            #     result_data_retry["api_cost"] = (result_data_retry.get("api_cost") or 0.0) + api_cost_rp2 # Removed
                            logging.info(f"Question {question_id_retry}: API Retry + Re-prompt {'succeeded' if result_data_retry['parse_successful'] else 'failed to parse'}.")
                        except Exception as e_rp2:
                            logging.error(f"Re-prompt API call failed for question {question_id_retry} after API retry: {e_rp2}")
                            result_data_retry.update({
                                "error": f"API retry ok, parse failed. Re-prompt API call failed: {str(e_rp2)}",
                                "attempt": 2 # Stay at attempt 2 as re-prompt itself failed
                            })
                    else:
                        current_error_retry = result_data_retry.get("error")
                        if api_success_attempt2 and raw_response_attempt2 is None and parsed_answer_retry is None:
                            current_error_retry = "API retry call returned empty content. Re-prompt skipped."
                        
                        # Process parsed_answer_retry before assignment
                        if isinstance(parsed_answer_retry, list):
                            processed_retry_answer = [str(item) for item in parsed_answer_retry]
                        else:
                            processed_retry_answer = parsed_answer_retry

                        result_data_retry.update({
                            "predicted_answer": processed_retry_answer, "raw_response": raw_response_attempt2,
                            "parse_successful": parse_success_attempt2, "api_call_successful": api_success_attempt2,
                            "error": None if parse_success_attempt2 else current_error_retry, # Clear initial error if parse now ok
                            "attempt": 2
                        })
                except Exception as e_retry_api:
                    logging.error(f"API call failed permanently for question {question_id_retry} (Attempt 2 API Retry): {e_retry_api}")
                    result_data_retry["error"] = f"Initial API fail. Retry API call also failed: {str(e_retry_api)}"
                    result_data_retry["api_call_successful"] = False
                
                # Calculate score details for the current result_data_retry
                score_details_retry = calculate_single_question_score_details(result_data_retry)
                result_data_retry['marks_awarded'] = score_details_retry.get('marks_awarded')
                result_data_retry['evaluation_status'] = score_details_retry.get('evaluation_status')

                # Append evaluation details to summary.jsonl for retry pass
                summary_detail_data_retry = {
                    "question_id": question_id_retry,
                    "marks_awarded": result_data_retry['marks_awarded'],
                    "evaluation_status": result_data_retry['evaluation_status'],
                    "predicted_answer": result_data_retry['predicted_answer'], # Add predicted_answer
                    "ground_truth": result_data_retry['ground_truth'], # Add ground_truth
                    "attempt": result_data_retry['attempt']
                }
                append_summary_detail(summary_detail_data_retry, summary_details_path)

                model_results.append(result_data_retry)
                append_prediction(result_data_retry, predictions_path) # append_prediction now handles removing evaluation fields

                # Logging for retry pass
                log_message_prefix_retry = f"Question {question_id_retry} (Retry):"
                log_message_suffix_retry = f"(Attempt {result_data_retry['attempt']})"
                final_parsed_answer_retry = result_data_retry["predicted_answer"]

                if not result_data_retry["api_call_successful"]:
                    retry_api_fail_count += 1
                    logging.info(f"{MAGENTA}{log_message_prefix_retry} API Call Failed {log_message_suffix_retry}{RESET}")
                elif not result_data_retry["parse_successful"]:
                    retry_parse_fail_count += 1
                    logging.info(f"{CYAN}{log_message_prefix_retry} Failed to parse answer {log_message_suffix_retry}{RESET}")
                elif final_parsed_answer_retry == "SKIP":
                    retry_skipped_count += 1
                    logging.info(f"{YELLOW}{log_message_prefix_retry} Skipped {log_message_suffix_retry}{RESET}")
                else: # This 'else' means API call was successful, parse was successful, and predicted_answer was not "SKIP" (by model)
                    marks_awarded_retry = result_data_retry.get('marks_awarded', 0)
                    evaluation_status_value_retry = result_data_retry.get('evaluation_status')

                    is_considered_correct_retry = False
                    log_display_status_retry = "N/A"
                    status_check_string_retry = ""

                    if evaluation_status_value_retry is True:
                        is_considered_correct_retry = True
                        log_display_status_retry = "True (Boolean)"
                        status_check_string_retry = "CORRECT_TRUE_BOOLEAN"
                    elif isinstance(evaluation_status_value_retry, str):
                        log_display_status_retry = evaluation_status_value_retry
                        status_check_string_retry = evaluation_status_value_retry.strip().upper()
                        if "CORRECT" in status_check_string_retry:
                            is_considered_correct_retry = True
                    elif evaluation_status_value_retry is None:
                        log_display_status_retry = "None"
                        status_check_string_retry = "NONE_STATUS"
                    else: # Other types
                        log_display_status_retry = str(evaluation_status_value_retry)
                        status_check_string_retry = str(evaluation_status_value_retry).strip().upper()

                    known_eval_skip_statuses_retry = ["SKIPPED_BY_EVAL", "SKIPPED"]
                        
                    if is_considered_correct_retry:
                        retry_correct_count +=1
                        logging.info(f"{GREEN}{log_message_prefix_retry} Correct (log) - Marks: {marks_awarded_retry}, Status: {log_display_status_retry} {log_message_suffix_retry}{RESET}")
                    elif status_check_string_retry in known_eval_skip_statuses_retry:
                        retry_skipped_count += 1
                        logging.info(f"{YELLOW}{log_message_prefix_retry} Skipped by Eval - Marks: {marks_awarded_retry}, Status: {log_display_status_retry} {log_message_suffix_retry}{RESET}")
                    else:
                        retry_incorrect_count += 1
                        logging.info(f"{RED}{log_message_prefix_retry} Incorrect (log) - Marks: {marks_awarded_retry}, Status: {log_display_status_retry} {log_message_suffix_retry}{RESET}")
                
                pbar_retry.set_postfix_str(f"βœ“:{retry_correct_count} βœ—:{retry_incorrect_count} S:{retry_skipped_count} P!:{retry_parse_fail_count} A!:{retry_api_fail_count}")
            pbar_retry.close() # Ensure tqdm finishes cleanly

        # --- Final Evaluation for the current model ---
        logging.info(f"--- Calculating final results for model: {model_id} ---")
        
        # Always use calculate_exam_scores now
        evaluation_summary = calculate_exam_scores(model_results) # model_results modified in-place
        
        # Use the actual choices for the summary, defaulting to "All" if not specified or "all"
        summary_exam_name_display = exam_name_choice if exam_name_choice and exam_name_choice.lower() != "all" else "All_Exams"
        summary_exam_year_display = exam_year_choice if exam_year_choice and exam_year_choice.lower() != "all" else "All_Years"

        summary = {
            "model_name": model_id, # This is model_to_run
            "exam_name": summary_exam_name_display,
            "exam_year": summary_exam_year_display,
            "question_ids_filter": question_ids_str if question_ids_str else "None", # Add question ID filter info
            "timestamp": timestamp,
            "total_questions_in_dataset": original_dataset_size, # Total before any filtering
            "total_questions_processed_in_run": len(dataset), # Total after filtering for this run
            **evaluation_summary 
        }
        logging.info(f"Overall Score: {summary.get('overall_score')}")
        logging.info(f"Full Correct: {summary.get('overall_correct_full')}, Partial Correct: {summary.get('overall_partial_correct')}, Incorrect Choice: {summary.get('overall_incorrect_choice')}, Skipped: {summary.get('overall_skipped')}, API/Parse Failures: {summary.get('overall_api_parse_failures')}")
        
        logging.info(f"--- Results Summary for model: {model_id} ---")
        logging.info(json.dumps(summary, indent=2, sort_keys=True))
        logging.info("-------------------------------------")

        # The model_results list was modified in-place by calculate_exam_scores
        # to include evaluation_status and marks_awarded.
        # predictions.jsonl is now written incrementally without evaluation details.
        # No need to overwrite predictions.jsonl here.


        # Save final summary (Markdown) for the current model
        # The summary.json file is no longer generated as per user request.
        generate_markdown_summary(summary, markdown_summary_path) # Call the new function

    logging.info("Benchmark run completed.")


if __name__ == "__main__":
    # Get available choices for arguments
    # Assuming benchmark_config.yaml is in a 'configs' directory relative to script or a fixed path
    # Assuming metadata.jsonl is in a 'data' directory relative to script or a fixed path
    default_config_path = "configs/benchmark_config.yaml"
    default_metadata_path = "data/metadata.jsonl"

    available_models = get_available_models(default_config_path)
    available_exam_names, available_exam_years = get_available_exam_details(default_metadata_path)

    # Add "all" option for exams and years
    exam_name_choices = ["all"] + available_exam_names
    exam_year_choices = ["all"] + available_exam_years

    parser = argparse.ArgumentParser(description="Run JEE/NEET LLM Benchmark.")
    parser.add_argument(
        "--config",
        type=str,
        default=default_config_path,
        help=f"Path to the benchmark configuration YAML file (default: {default_config_path})."
    )
    parser.add_argument(
        "--model", # Changed from --models
        type=str,
        required=True if available_models else False, # Required if models are available
        choices=available_models if available_models else None,
        help="Select the model to run." + (f" Available: {', '.join(available_models)}." if available_models else " (No models found in config)")
    )
    parser.add_argument(
        "--output_dir",
        type=str,
        help="Override the base output directory specified in the config file."
    )
    parser.add_argument(
        "--exam_name",
        type=str,
        default="all",
        choices=exam_name_choices if exam_name_choices else ["all"],
        help="Select the exam name to run, or 'all' for all exams." + (f" Available: {', '.join(available_exam_names)}." if available_exam_names else "")
    )
    parser.add_argument(
        "--exam_year",
        type=str, 
        default="all",
        choices=exam_year_choices if exam_year_choices else ["all"],
        help="Select the exam year to run, or 'all' for all years." + (f" Available: {', '.join(available_exam_years)}." if available_exam_years else "")
    )
    parser.add_argument(
        "--question_ids",
        type=str,
        default=None,
        help="Optional: Comma-separated list of specific question IDs to run (e.g., ID1,ID2,ID3)."
    )
    args = parser.parse_args()

    # Dynamically update config path if user provides a different one
    if args.config != default_config_path:
        logging.info(f"User provided config path: {args.config}. Re-fetching models if necessary.")
        # If models were not found with default, or if user specified a different config, try to load models from it.
        if not available_models or args.model not in available_models: 
            user_config_models = get_available_models(args.config)
            if args.model not in user_config_models:
                logging.error(f"Selected model '{args.model}' not found in the specified config '{args.config}'. Exiting.")
                exit(1) # Or handle more gracefully
            # Potentially update choices if parser allowed any string due to no initial models
            # This is complex with argparse after parsing. For now, we rely on the initial check or error out.

    try:
        # Load API key first - fail fast if not set
        api_key = load_api_key()
        # Load configuration using the (potentially user-overridden) config path
        config = load_config(args.config)
        
        # Ensure the selected model is valid if it was dynamically loaded
        if args.model not in config.get("openrouter_models", []):
             # This check is important if args.model was accepted because available_models was initially empty
            if args.model not in get_available_models(args.config): # Double check with the final config
                logging.error(f"The model '{args.model}' specified is not listed in the config file '{args.config}'. Please check the model name or the config file.")
                exit(1)


        # Run the benchmark
        run_benchmark(
            config=config, 
            api_key=api_key, 
            model_to_run=args.model, 
            output_dir_override=args.output_dir,
            exam_name_choice=args.exam_name,
            exam_year_choice=args.exam_year,
            question_ids_str=args.question_ids
        )
    except (ValueError, FileNotFoundError, yaml.YAMLError) as e:
        logging.error(f"Setup failed: {e}")
    except Exception as e:
        logging.error(f"An unexpected error occurred during benchmark execution: {e}", exc_info=True)