|
import os |
|
from io import BytesIO |
|
from pathlib import Path |
|
import datasets |
|
import pandas as pd |
|
import numpy as np |
|
import json |
|
from astropy.io import fits |
|
|
|
from utils.parallelzipfile import ParallelZipFile as ZipFile |
|
|
|
_DESCRIPTION = ( |
|
"AstroM3 is a time-series astronomy dataset containing photometry, spectra, " |
|
"and metadata features for variable stars. The dataset includes multiple " |
|
"subsets (full, sub10, sub25, sub50) and supports different random seeds (42, 66, 0, 12, 123). " |
|
"Each sample consists of:\n" |
|
"- **Photometry**: Light curve data of shape `(N, 3)` (time, flux, flux_error).\n" |
|
"- **Spectra**: Spectral observations of shape `(M, 3)` (wavelength, flux, flux_error).\n" |
|
"- **Metadata**: Auxiliary features of shape `(25,)`.\n" |
|
"- **Label**: The class name as a string." |
|
) |
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/AstroM3" |
|
_LICENSE = "CC BY 4.0" |
|
_URL = "https://huggingface.co/datasets/AstroM3" |
|
_VERSION = datasets.Version("1.0.0") |
|
|
|
_CITATION = """ |
|
@article{AstroM3, |
|
title={AstroM3: A Multi-Modal Astronomy Dataset}, |
|
author={Your Name}, |
|
year={2025}, |
|
journal={AstroML Conference} |
|
} |
|
""" |
|
|
|
|
|
class AstroM3Dataset(datasets.GeneratorBasedBuilder): |
|
"""Hugging Face dataset for AstroM3 with configurable subsets and seeds.""" |
|
|
|
DEFAULT_CONFIG_NAME = "full_42" |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name=f"{sub}_{seed}", |
|
version=_VERSION |
|
) |
|
for sub in ["full", "sub10", "sub25", "sub50"] |
|
for seed in [42, 66, 0, 12, 123] |
|
] |
|
|
|
def __init__(self, **kwargs): |
|
super().__init__(**kwargs) |
|
|
|
|
|
if not hasattr(self.config, "data_dir") or self.config.data_dir is None: |
|
self.config.data_dir = Path(os.getcwd()).resolve() |
|
print(f"Using dataset location: {self.config.data_dir}") |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"photometry": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=3)), |
|
"spectra": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=3)), |
|
"metadata": datasets.Sequence(datasets.Value("float32"), length=25), |
|
"label": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=("photometry", "label"), |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators for train, val, and test.""" |
|
self.config.data_dir = Path(self.config.data_dir) |
|
sub, seed = self.config.name.split("_") |
|
data_root = self.config.data_dir / "splits" / sub / seed |
|
info_path = data_root / "info.json" |
|
|
|
if not info_path.exists(): |
|
raise FileNotFoundError(f"Missing info.json file: {info_path}") |
|
|
|
with open(info_path, "r") as f: |
|
self.dataset_info = json.load(f) |
|
|
|
|
|
self.reader_v = ZipFile(Path(self.config.data_dir) / 'asassnvarlc_vband_complete.zip') |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_root / "train.csv"} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_root / "val.csv"} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, gen_kwargs={"filepath": data_root / "test.csv"} |
|
), |
|
] |
|
|
|
def _get_photometry(self, file_name): |
|
csv = BytesIO() |
|
file_name = file_name.replace(' ', '') |
|
data_path = f'vardb_files/{file_name}.dat' |
|
|
|
csv.write(self.reader_v.read(data_path)) |
|
csv.seek(0) |
|
|
|
lc = pd.read_csv(csv, sep=r'\s+', skiprows=2, names=['HJD', 'MAG', 'MAG_ERR', 'FLUX', 'FLUX_ERR'], |
|
dtype={'HJD': float, 'MAG': float, 'MAG_ERR': float, 'FLUX': float, 'FLUX_ERR': float}) |
|
|
|
return lc[['HJD', 'FLUX', 'FLUX_ERR']].values |
|
|
|
@staticmethod |
|
def _get_spectra(file_name): |
|
hdulist = fits.open(file_name) |
|
len_list = len(hdulist) |
|
|
|
if len_list == 1: |
|
head = hdulist[0].header |
|
scidata = hdulist[0].data |
|
coeff0 = head['COEFF0'] |
|
coeff1 = head['COEFF1'] |
|
pixel_num = head['NAXIS1'] |
|
specflux = scidata[0,] |
|
ivar = scidata[1,] |
|
wavelength = np.linspace(0, pixel_num - 1, pixel_num) |
|
wavelength = np.power(10, (coeff0 + wavelength * coeff1)) |
|
hdulist.close() |
|
elif len_list == 2: |
|
head = hdulist[0].header |
|
scidata = hdulist[1].data |
|
wavelength = scidata[0][2] |
|
ivar = scidata[0][1] |
|
specflux = scidata[0][0] |
|
else: |
|
raise ValueError(f'Wrong number of fits files. {len_list} should be 1 or 2') |
|
|
|
return np.vstack((wavelength, specflux, ivar)).T |
|
|
|
def _generate_examples(self, filepath): |
|
"""Yields examples from a CSV file containing photometry, spectra, metadata, and labels.""" |
|
if not filepath.exists(): |
|
raise FileNotFoundError(f"Missing dataset file: {filepath}") |
|
|
|
df = pd.read_csv(filepath) |
|
|
|
for idx, row in df.iterrows(): |
|
photometry = self._get_photometry(row['name']) |
|
spectra = np.zeros((200, 3)) |
|
metadata = np.zeros(25) |
|
|
|
yield idx, { |
|
"id": str(row["id"]), |
|
"photometry": photometry.tolist(), |
|
"spectra": spectra.tolist(), |
|
"metadata": metadata.tolist(), |
|
"label": row["target"], |
|
} |
|
|