|
import os |
|
from io import BytesIO |
|
import datasets |
|
import pandas as pd |
|
import numpy as np |
|
import json |
|
from astropy.io import fits |
|
|
|
from utils.parallelzipfile import ParallelZipFile as ZipFile |
|
|
|
_DESCRIPTION = ( |
|
"AstroM3 is a time-series astronomy dataset containing photometry, spectra, " |
|
"and metadata features for variable stars. The dataset includes multiple " |
|
"subsets (full, sub10, sub25, sub50) and supports different random seeds (42, 66, 0, 12, 123). " |
|
"Each sample consists of:\n" |
|
"- **Photometry**: Light curve data of shape `(N, 3)` (time, flux, flux_error).\n" |
|
"- **Spectra**: Spectral observations of shape `(M, 3)` (wavelength, flux, flux_error).\n" |
|
"- **Metadata**: Auxiliary features of shape `(25,)`.\n" |
|
"- **Label**: The class name as a string." |
|
) |
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/AstroM3" |
|
_LICENSE = "CC BY 4.0" |
|
_URL = "https://huggingface.co/datasets/MeriDK/AstroM3Dataset/resolve/main" |
|
_VERSION = datasets.Version("1.0.0") |
|
|
|
_CITATION = """ |
|
@article{AstroM3, |
|
title={AstroM3: A Multi-Modal Astronomy Dataset}, |
|
author={Your Name}, |
|
year={2025}, |
|
journal={AstroML Conference} |
|
} |
|
""" |
|
|
|
|
|
class AstroM3Dataset(datasets.GeneratorBasedBuilder): |
|
"""Hugging Face dataset for AstroM3 with configurable subsets and seeds.""" |
|
|
|
DEFAULT_CONFIG_NAME = "full_42" |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name=f"{sub}_{seed}", version=_VERSION, data_dir=None) |
|
for sub in ["full", "sub10", "sub25", "sub50"] |
|
for seed in [42, 66, 0, 12, 123] |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"photometry": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=3)), |
|
"spectra": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=3)), |
|
"metadata": datasets.Sequence(datasets.Value("float32"), length=25), |
|
"label": datasets.Value("string"), |
|
} |
|
), |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _get_photometry(self, file_name): |
|
csv = BytesIO() |
|
file_name = file_name.replace(' ', '') |
|
data_path = f'vardb_files/{file_name}.dat' |
|
|
|
csv.write(self.reader_v.read(data_path)) |
|
csv.seek(0) |
|
|
|
lc = pd.read_csv(csv, sep=r'\s+', skiprows=2, names=['HJD', 'MAG', 'MAG_ERR', 'FLUX', 'FLUX_ERR'], |
|
dtype={'HJD': float, 'MAG': float, 'MAG_ERR': float, 'FLUX': float, 'FLUX_ERR': float}) |
|
|
|
return lc[['HJD', 'FLUX', 'FLUX_ERR']].values |
|
|
|
@staticmethod |
|
def _get_spectra(file_name): |
|
hdulist = fits.open(file_name) |
|
len_list = len(hdulist) |
|
|
|
if len_list == 1: |
|
head = hdulist[0].header |
|
scidata = hdulist[0].data |
|
coeff0 = head['COEFF0'] |
|
coeff1 = head['COEFF1'] |
|
pixel_num = head['NAXIS1'] |
|
specflux = scidata[0,] |
|
ivar = scidata[1,] |
|
wavelength = np.linspace(0, pixel_num - 1, pixel_num) |
|
wavelength = np.power(10, (coeff0 + wavelength * coeff1)) |
|
hdulist.close() |
|
elif len_list == 2: |
|
head = hdulist[0].header |
|
scidata = hdulist[1].data |
|
wavelength = scidata[0][2] |
|
ivar = scidata[0][1] |
|
specflux = scidata[0][0] |
|
else: |
|
raise ValueError(f'Wrong number of fits files. {len_list} should be 1 or 2') |
|
|
|
return np.vstack((wavelength, specflux, ivar)).T |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators for train, val, and test.""" |
|
|
|
|
|
sub, seed = self.config.name.split("_") |
|
|
|
|
|
urls = { |
|
"train": f"{_URL}/splits/{sub}/{seed}/train.csv", |
|
"val": f"{_URL}/splits/{sub}/{seed}/val.csv", |
|
"test": f"{_URL}/splits/{sub}/{seed}/test.csv", |
|
"info": f"{_URL}/splits/{sub}/{seed}/info.json", |
|
} |
|
extracted_path = dl_manager.download_and_extract(urls) |
|
|
|
|
|
spectra_urls = {} |
|
|
|
for split in ["train", "val", "test"]: |
|
df = pd.read_csv(extracted_path[split]) |
|
for _, row in df.iterrows(): |
|
spectra_url = f"{_URL}/spectra/{split}/{row['target']}/{row['spec_filename']}" |
|
spectra_urls[row["spec_filename"]] = spectra_url |
|
|
|
spectra = dl_manager.download_and_extract(spectra_urls) |
|
|
|
|
|
photometry_path = dl_manager.download(f"{_URL}/photometry.zip") |
|
self.reader_v = ZipFile(photometry_path) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"csv_path": extracted_path["train"], |
|
"info_path": extracted_path["info"], |
|
"spectra": spectra, |
|
"split": "train"} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, gen_kwargs={"csv_path": extracted_path["val"], |
|
"info_path": extracted_path["info"], |
|
"spectra": spectra, |
|
"split": "val"} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, gen_kwargs={"csv_path": extracted_path["test"], |
|
"info_path": extracted_path["info"], |
|
"spectra": spectra, |
|
"split": "test"} |
|
), |
|
] |
|
|
|
def _generate_examples(self, csv_path, info_path, spectra, split): |
|
"""Yields examples from a CSV file containing photometry, spectra, metadata, and labels.""" |
|
|
|
if not os.path.exists(csv_path): |
|
raise FileNotFoundError(f"Missing dataset file: {csv_path}") |
|
|
|
if not os.path.exists(info_path): |
|
raise FileNotFoundError(f"Missing info file: {info_path}") |
|
|
|
df = pd.read_csv(csv_path) |
|
|
|
with open(info_path) as f: |
|
info = json.loads(f.read()) |
|
|
|
for idx, row in df.iterrows(): |
|
photometry = self._get_photometry(row["name"]) |
|
spectra = self._get_spectra(spectra[row['spec_filename']]) |
|
|
|
yield idx, { |
|
"photometry": photometry, |
|
"spectra": spectra, |
|
"metadata": row[info["all_cols"]], |
|
"label": row["target"], |
|
} |
|
|