File size: 7,559 Bytes
de60742 2f49b44 de60742 2f49b44 de60742 2f49b44 de60742 2f49b44 de60742 2f49b44 de60742 eb0a98a de60742 1ab1a75 de60742 2f49b44 eb0a98a 2f49b44 de60742 2f49b44 de60742 2f49b44 de60742 2f49b44 de60742 2f49b44 de60742 2f49b44 de60742 2f49b44 de60742 2f49b44 de60742 2f49b44 de60742 2f49b44 de60742 2f49b44 de60742 eb0a98a de60742 1ab1a75 de60742 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
import json
import os
import logging # Added
import tarfile # Added (though dl_manager handles .tar.gz, good for completeness if script evolves)
import datasets
_CITATION = """\
@misc{jee-neet-benchmark,
title={JEE/NEET LLM Benchmark},
author={Md Rejaullah},
year={2025},
howpublished={\\url{https://huggingface.co/datasets/Reja1/jee-neet-benchmark}},
}
"""
_DESCRIPTION = """\
A benchmark dataset for evaluating Large Language Models (LLMs) on Joint Entrance Examination (JEE)
and National Eligibility cum Entrance Test (NEET) questions from India. Questions are provided as
images, and metadata includes exam details, subject, and correct answers.
"""
_HOMEPAGE = "https://huggingface.co/datasets/Reja1/jee-neet-benchmark"
_LICENSE = "MIT License"
class JeeNeetBenchmarkConfig(datasets.BuilderConfig):
"""BuilderConfig for JeeNeetBenchmark."""
def __init__(self, images_dir="images", **kwargs):
"""BuilderConfig for JeeNeetBenchmark.
Args:
images_dir: Directory containing the image files, relative to the dataset root.
**kwargs: keyword arguments forwarded to super.
"""
self.images_dir = images_dir
super(JeeNeetBenchmarkConfig, self).__init__(**kwargs)
class JeeNeetBenchmark(datasets.GeneratorBasedBuilder):
"""JEE/NEET LLM Benchmark Dataset."""
VERSION = datasets.Version("1.0.0") # Start with version 1.0.0
BUILDER_CONFIGS = [
JeeNeetBenchmarkConfig(
name="default",
version=VERSION,
description="Default config for JEE/NEET Benchmark",
images_dir="images", # Default images directory
),
]
DEFAULT_CONFIG_NAME = "default"
def _info(self):
features = datasets.Features(
{
"image": datasets.Image(),
"question_id": datasets.Value("string"),
"exam_name": datasets.Value("string"),
"exam_year": datasets.Value("int32"),
"exam_code": datasets.Value("string"), # Will provide default if missing in source
"subject": datasets.Value("string"),
"question_type": datasets.Value("string"),
"correct_answer": datasets.Value("string"), # Store as JSON string
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Define paths to the files within the Hugging Face dataset repository
# Assumes 'images.tar.gz' is at the root and 'metadata.jsonl' is in 'data/'
repo_metadata_path = os.path.join("data", "metadata.jsonl")
repo_images_archive_path = "images.tar.gz" # At the root of the repository
# Ensure force download and extract for the current run
# dl_manager.download_config is an instance of datasets.DownloadConfig
dl_manager.download_config.force_download = True
dl_manager.download_config.force_extract = True # If redownloading, re-extraction is also desired
try:
# Download and extract metadata and images archive
downloaded_files = dl_manager.download_and_extract({
"metadata_file": repo_metadata_path,
"images_archive": repo_images_archive_path
})
except Exception as e:
# More specific error if download/extraction fails
logging.error(f"Failed to download/extract dataset files. Metadata path in repo: '{repo_metadata_path}', Images archive path in repo: '{repo_images_archive_path}'. Error: {e}")
raise
metadata_path = downloaded_files["metadata_file"]
# images_archive_path is the directory where images.tar.gz was extracted by dl_manager
images_extracted_root = downloaded_files["images_archive"]
logging.info(f"Metadata file successfully downloaded to: {metadata_path}")
logging.info(f"Images archive successfully extracted to: {images_extracted_root}")
# Verify that the essential files/directories exist after download/extraction
if not os.path.exists(metadata_path):
error_msg = f"Metadata file not found at expected local path after download: {metadata_path}. Check repository path '{repo_metadata_path}'."
logging.error(error_msg)
raise FileNotFoundError(error_msg)
if not os.path.isdir(images_extracted_root):
error_msg = f"Images archive was not extracted to a valid directory: {images_extracted_root}. Check repository path '{repo_images_archive_path}' and archive integrity."
logging.error(error_msg)
raise FileNotFoundError(error_msg)
# The image_base_dir for _generate_examples will be the root of the extracted archive.
# Paths in metadata.jsonl (e.g., "images/NEET_2024_T3/file.png")
# are assumed to be relative to this extracted root.
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"metadata_filepath": metadata_path,
"image_base_dir": images_extracted_root,
},
),
]
def _generate_examples(self, metadata_filepath, image_base_dir):
"""Yields examples."""
logging.info(f"Generating examples from metadata: {metadata_filepath}")
logging.info(f"Using image base directory: {image_base_dir}")
with open(metadata_filepath, "r", encoding="utf-8") as f:
for idx, line in enumerate(f):
try:
row = json.loads(line)
except json.JSONDecodeError as e:
logging.error(f"Error decoding JSON on line {idx+1} in {metadata_filepath}: {e}")
continue # Skip malformed lines
# image_path_from_metadata is e.g., "images/NEET_2024_T3/file.png"
# This path is assumed to be relative to the root of the extracted image archive (image_base_dir)
image_path_from_metadata = row.get("image_path")
if not image_path_from_metadata:
logging.warning(f"Missing 'image_path' in metadata on line {idx+1} of {metadata_filepath}. Skipping.")
continue
# Construct the full absolute path to the image file
image_path_full = os.path.join(image_base_dir, image_path_from_metadata)
if not os.path.exists(image_path_full):
logging.warning(f"Image file not found at {image_path_full} (referenced on line {idx+1} of {metadata_filepath}). Skipping.")
continue
yield idx, {
"image": image_path_full, # Pass the full path; datasets.Image() will load it
"question_id": row.get("question_id", ""),
"exam_name": row.get("exam_name", ""),
"exam_year": row.get("exam_year", -1), # Use a default if missing
"exam_code": row.get("exam_code", "N/A"), # Provide "N/A" if exam_code is missing
"subject": row.get("subject", ""),
"question_type": row.get("question_type", ""),
"correct_answer": json.dumps(row.get("correct_answer", [])),
}
|