from io import BytesIO import datasets import pandas as pd import numpy as np import json from astropy.io import fits from .utils import ParallelZipFile _DESCRIPTION = ( "AstroM3 is a multi-modal time-series astronomy dataset containing photometry, spectra, " "and metadata features for variable stars. The dataset consists of multiple subsets " "('full', 'sub10', 'sub25', 'sub50') and supports different random seeds (42, 66, 0, 12, 123). " "\n\nEach sample includes:\n" "- **Photometry**: Time-series light curve data with shape `(N, 3)` representing time, flux, " "and flux uncertainty.\n" "- **Spectra**: Spectral observations with shape `(M, 3)` containing wavelength, flux, and flux uncertainty.\n" "- **Metadata**: Auxiliary astrophysical and photometric parameters (e.g., magnitudes, parallax, motion data) " "stored as a dictionary.\n" "- **Label**: The classification of the star as a string." ) _HOMEPAGE = "https://huggingface.co/datasets/AstroM3" _LICENSE = "CC BY 4.0" _URL = "https://huggingface.co/datasets/MeriDK/AstroM3Dataset/resolve/main" _VERSION = datasets.Version("1.0.0") _CITATION = """ @article{rizhko2024astrom, title={AstroM $\^{} 3$: A self-supervised multimodal model for astronomy}, author={Rizhko, Mariia and Bloom, Joshua S}, journal={arXiv preprint arXiv:2411.08842}, year={2024} } """ _PHOTO_COLS = ['amplitude', 'period', 'lksl_statistic', 'rfr_score'] _METADATA_COLS = [ 'mean_vmag', 'phot_g_mean_mag', 'e_phot_g_mean_mag', 'phot_bp_mean_mag', 'e_phot_bp_mean_mag', 'phot_rp_mean_mag', 'e_phot_rp_mean_mag', 'bp_rp', 'parallax', 'parallax_error', 'parallax_over_error', 'pmra', 'pmra_error', 'pmdec', 'pmdec_error', 'j_mag', 'e_j_mag', 'h_mag', 'e_h_mag', 'k_mag', 'e_k_mag', 'w1_mag', 'e_w1_mag', 'w2_mag', 'e_w2_mag', 'w3_mag', 'w4_mag', 'j_k', 'w1_w2', 'w3_w4', 'pm', 'ruwe', 'l', 'b' ] _ALL_COLS = _PHOTO_COLS + _METADATA_COLS _METADATA_FUNC = { "abs": [ "mean_vmag", "phot_g_mean_mag", "phot_bp_mean_mag", "phot_rp_mean_mag", "j_mag", "h_mag", "k_mag", "w1_mag", "w2_mag", "w3_mag", "w4_mag", ], "cos": ["l"], "sin": ["b"], "log": ["period"] } class AstroM3Dataset(datasets.GeneratorBasedBuilder): """Hugging Face dataset for AstroM3, a multi-modal variable star dataset.""" # Default configuration (used if no config is specified) DEFAULT_CONFIG_NAME = "full_42" # Define dataset configurations (subsets, seeds, and normalization variants) BUILDER_CONFIGS = [ datasets.BuilderConfig(name=f"{sub}_{seed}{norm}", version=_VERSION) for sub in ["full", "sub10", "sub25", "sub50"] for seed in [42, 66, 0, 12, 123] for norm in ["", "_norm"] ] def _info(self): """Defines the dataset schema, including features and metadata.""" return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "photometry": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=3)), "spectra": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=3)), "metadata": { "meta_cols": {el: datasets.Value("float32") for el in _METADATA_COLS}, "photo_cols": {el: datasets.Value("float32") for el in _PHOTO_COLS}, }, "label": datasets.Value("string"), } ), homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _get_photometry(self, file_name): """Loads photometric light curve data from a compressed file.""" csv = BytesIO() file_name = file_name.replace(' ', '') # Ensure filenames are correctly formatted data_path = f'vardb_files/{file_name}.dat' # Read the photometry file from the compressed ZIP csv.write(self.reader_v.read(data_path)) csv.seek(0) # Read light curve data lc = pd.read_csv(csv, sep=r'\s+', skiprows=2, names=['HJD', 'MAG', 'MAG_ERR', 'FLUX', 'FLUX_ERR'], dtype={'HJD': float, 'MAG': float, 'MAG_ERR': float, 'FLUX': float, 'FLUX_ERR': float}) return lc[['HJD', 'FLUX', 'FLUX_ERR']].values @staticmethod def _get_spectra(file_name): """Loads spectral data from a FITS file.""" hdulist = fits.open(file_name) len_list = len(hdulist) if len_list == 1: head = hdulist[0].header scidata = hdulist[0].data coeff0 = head['COEFF0'] coeff1 = head['COEFF1'] pixel_num = head['NAXIS1'] specflux = scidata[0,] ivar = scidata[1,] wavelength = np.linspace(0, pixel_num - 1, pixel_num) wavelength = np.power(10, (coeff0 + wavelength * coeff1)) hdulist.close() elif len_list == 2: head = hdulist[0].header scidata = hdulist[1].data wavelength = scidata[0][2] ivar = scidata[0][1] specflux = scidata[0][0] else: raise ValueError(f'Wrong number of fits files. {len_list} should be 1 or 2') return np.vstack((wavelength, specflux, ivar)).T @staticmethod def transform(df): """Applies transformations to metadata.""" for transformation_type, value in _METADATA_FUNC.items(): if transformation_type == "abs": for col in value: df[col] = ( df[col] - 10 + 5 * np.log10(np.where(df["parallax"] <= 0, 1, df["parallax"])) ) elif transformation_type == "cos": for col in value: df[col] = np.cos(np.radians(df[col])) elif transformation_type == "sin": for col in value: df[col] = np.sin(np.radians(df[col])) elif transformation_type == "log": for col in value: df[col] = np.log10(df[col]) def _split_generators(self, dl_manager): """Defines dataset splits and downloads required files.""" # Get subset and seed info from the name name = self.config.name.split("_") sub, seed = name[0], name[1] # Load the splits and info files urls = { "train": f"splits/{sub}/{seed}/train.csv", "val": f"splits/{sub}/{seed}/val.csv", "test": f"splits/{sub}/{seed}/test.csv", "info": f"splits/{sub}/{seed}/info.json", } extracted_path = dl_manager.download(urls) # Download all spectra files spectra_urls = {} for split in ("train", "val", "test"): df = pd.read_csv(extracted_path[split]) for _, row in df.iterrows(): spectra_urls[row["spec_filename"]] = f"spectra/{row['target']}/{row['spec_filename']}" spectra_files = dl_manager.download(spectra_urls) # Download photometry data and initialize ZIP reader photometry_path = dl_manager.download(f"photometry.zip") self.reader_v = ParallelZipFile(photometry_path) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"csv_path": extracted_path["train"], "info_path": extracted_path["info"], "spectra_files": spectra_files, "split": "train"} ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"csv_path": extracted_path["val"], "info_path": extracted_path["info"], "spectra_files": spectra_files, "split": "val"} ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"csv_path": extracted_path["test"], "info_path": extracted_path["info"], "spectra_files": spectra_files, "split": "test"} ), ] def _generate_examples(self, csv_path, info_path, spectra_files, split): """Yields individual dataset examples.""" df = pd.read_csv(csv_path) with open(info_path) as f: info = json.loads(f.read()) if "norm" in self.config.name: # Apply metadata transformations self.transform(df) # Normalize using precomputed mean and standard deviation df[_ALL_COLS] = (df[_ALL_COLS] - info["mean"]) / info["std"] for idx, row in df.iterrows(): photometry = self._get_photometry(row["name"]) spectra = self._get_spectra(spectra_files[row["spec_filename"]]) yield idx, { "photometry": photometry, "spectra": spectra, "metadata": { "meta_cols": {el: row[el] for el in _METADATA_COLS}, "photo_cols": {el: row[el] for el in _PHOTO_COLS}, }, "label": row["target"], }