MeriDK commited on
Commit
9c3c821
·
1 Parent(s): d9e8f74

Updated loading logic

Browse files
Files changed (1) hide show
  1. AstroM3Dataset.py +100 -26
AstroM3Dataset.py CHANGED
@@ -1,4 +1,3 @@
1
- import os
2
  from io import BytesIO
3
  import datasets
4
  import pandas as pd
@@ -9,14 +8,16 @@ from astropy.io import fits
9
  from .utils import ParallelZipFile
10
 
11
  _DESCRIPTION = (
12
- "AstroM3 is a time-series astronomy dataset containing photometry, spectra, "
13
- "and metadata features for variable stars. The dataset includes multiple "
14
- "subsets (full, sub10, sub25, sub50) and supports different random seeds (42, 66, 0, 12, 123). "
15
- "Each sample consists of:\n"
16
- "- **Photometry**: Light curve data of shape `(N, 3)` (time, flux, flux_error).\n"
17
- "- **Spectra**: Spectral observations of shape `(M, 3)` (wavelength, flux, flux_error).\n"
18
- "- **Metadata**: Auxiliary features of shape `(25,)`.\n"
19
- "- **Label**: The class name as a string."
 
 
20
  )
21
 
22
  _HOMEPAGE = "https://huggingface.co/datasets/AstroM3"
@@ -25,33 +26,69 @@ _URL = "https://huggingface.co/datasets/MeriDK/AstroM3Dataset/resolve/main"
25
  _VERSION = datasets.Version("1.0.0")
26
 
27
  _CITATION = """
28
- @article{AstroM3,
29
- title={AstroM3: A Multi-Modal Astronomy Dataset},
30
- author={Your Name},
31
- year={2025},
32
- journal={AstroML Conference}
33
  }
34
  """
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
  class AstroM3Dataset(datasets.GeneratorBasedBuilder):
38
- """Hugging Face dataset for AstroM3 with configurable subsets and seeds."""
39
 
 
40
  DEFAULT_CONFIG_NAME = "full_42"
 
 
41
  BUILDER_CONFIGS = [
42
- datasets.BuilderConfig(name=f"{sub}_{seed}", version=_VERSION, data_dir=None)
43
  for sub in ["full", "sub10", "sub25", "sub50"]
44
  for seed in [42, 66, 0, 12, 123]
 
45
  ]
46
 
47
  def _info(self):
 
 
48
  return datasets.DatasetInfo(
49
  description=_DESCRIPTION,
50
  features=datasets.Features(
51
  {
52
- "photometry": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=3)),
53
- "spectra": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=3)),
54
- "metadata": datasets.Sequence(datasets.Value("float32"), length=38),
 
 
 
55
  "label": datasets.Value("string"),
56
  }
57
  ),
@@ -61,13 +98,17 @@ class AstroM3Dataset(datasets.GeneratorBasedBuilder):
61
  )
62
 
63
  def _get_photometry(self, file_name):
 
 
64
  csv = BytesIO()
65
- file_name = file_name.replace(' ', '')
66
  data_path = f'vardb_files/{file_name}.dat'
67
 
 
68
  csv.write(self.reader_v.read(data_path))
69
  csv.seek(0)
70
 
 
71
  lc = pd.read_csv(csv, sep=r'\s+', skiprows=2, names=['HJD', 'MAG', 'MAG_ERR', 'FLUX', 'FLUX_ERR'],
72
  dtype={'HJD': float, 'MAG': float, 'MAG_ERR': float, 'FLUX': float, 'FLUX_ERR': float})
73
 
@@ -75,6 +116,8 @@ class AstroM3Dataset(datasets.GeneratorBasedBuilder):
75
 
76
  @staticmethod
77
  def _get_spectra(file_name):
 
 
78
  hdulist = fits.open(file_name)
79
  len_list = len(hdulist)
80
 
@@ -100,11 +143,32 @@ class AstroM3Dataset(datasets.GeneratorBasedBuilder):
100
 
101
  return np.vstack((wavelength, specflux, ivar)).T
102
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  def _split_generators(self, dl_manager):
104
- """Returns SplitGenerators for train, val, and test."""
105
 
106
  # Get subset and seed info from the name
107
- sub, seed = self.config.name.split("_")
 
108
 
109
  # Load the splits and info files
110
  urls = {
@@ -115,7 +179,7 @@ class AstroM3Dataset(datasets.GeneratorBasedBuilder):
115
  }
116
  extracted_path = dl_manager.download(urls)
117
 
118
- # Load all spectra files
119
  spectra_urls = {}
120
 
121
  for split in ("train", "val", "test"):
@@ -125,7 +189,7 @@ class AstroM3Dataset(datasets.GeneratorBasedBuilder):
125
 
126
  spectra_files = dl_manager.download(spectra_urls)
127
 
128
- # Load photometry and init reader
129
  photometry_path = dl_manager.download(f"photometry.zip")
130
  self.reader_v = ParallelZipFile(photometry_path)
131
 
@@ -151,13 +215,20 @@ class AstroM3Dataset(datasets.GeneratorBasedBuilder):
151
  ]
152
 
153
  def _generate_examples(self, csv_path, info_path, spectra_files, split):
154
- """Yields examples from a CSV file containing photometry, spectra, metadata, and labels."""
155
 
156
  df = pd.read_csv(csv_path)
157
 
158
  with open(info_path) as f:
159
  info = json.loads(f.read())
160
 
 
 
 
 
 
 
 
161
  for idx, row in df.iterrows():
162
  photometry = self._get_photometry(row["name"])
163
  spectra = self._get_spectra(spectra_files[row["spec_filename"]])
@@ -165,6 +236,9 @@ class AstroM3Dataset(datasets.GeneratorBasedBuilder):
165
  yield idx, {
166
  "photometry": photometry,
167
  "spectra": spectra,
168
- "metadata": row[info["all_cols"]],
 
 
 
169
  "label": row["target"],
170
  }
 
 
1
  from io import BytesIO
2
  import datasets
3
  import pandas as pd
 
8
  from .utils import ParallelZipFile
9
 
10
  _DESCRIPTION = (
11
+ "AstroM3 is a multi-modal time-series astronomy dataset containing photometry, spectra, "
12
+ "and metadata features for variable stars. The dataset consists of multiple subsets "
13
+ "('full', 'sub10', 'sub25', 'sub50') and supports different random seeds (42, 66, 0, 12, 123). "
14
+ "\n\nEach sample includes:\n"
15
+ "- **Photometry**: Time-series light curve data with shape `(N, 3)` representing time, flux, "
16
+ "and flux uncertainty.\n"
17
+ "- **Spectra**: Spectral observations with shape `(M, 3)` containing wavelength, flux, and flux uncertainty.\n"
18
+ "- **Metadata**: Auxiliary astrophysical and photometric parameters (e.g., magnitudes, parallax, motion data) "
19
+ "stored as a dictionary.\n"
20
+ "- **Label**: The classification of the star as a string."
21
  )
22
 
23
  _HOMEPAGE = "https://huggingface.co/datasets/AstroM3"
 
26
  _VERSION = datasets.Version("1.0.0")
27
 
28
  _CITATION = """
29
+ @article{rizhko2024astrom,
30
+ title={AstroM $\^{} 3$: A self-supervised multimodal model for astronomy},
31
+ author={Rizhko, Mariia and Bloom, Joshua S},
32
+ journal={arXiv preprint arXiv:2411.08842},
33
+ year={2024}
34
  }
35
  """
36
 
37
+ _PHOTO_COLS = ['amplitude', 'period', 'lksl_statistic', 'rfr_score']
38
+ _METADATA_COLS = [
39
+ 'mean_vmag', 'phot_g_mean_mag', 'e_phot_g_mean_mag', 'phot_bp_mean_mag', 'e_phot_bp_mean_mag', 'phot_rp_mean_mag',
40
+ 'e_phot_rp_mean_mag', 'bp_rp', 'parallax', 'parallax_error', 'parallax_over_error', 'pmra', 'pmra_error', 'pmdec',
41
+ 'pmdec_error', 'j_mag', 'e_j_mag', 'h_mag', 'e_h_mag', 'k_mag', 'e_k_mag', 'w1_mag', 'e_w1_mag',
42
+ 'w2_mag', 'e_w2_mag', 'w3_mag', 'w4_mag', 'j_k', 'w1_w2', 'w3_w4', 'pm', 'ruwe', 'l', 'b'
43
+ ]
44
+ _ALL_COLS = _PHOTO_COLS + _METADATA_COLS
45
+ _METADATA_FUNC = {
46
+ "abs": [
47
+ "mean_vmag",
48
+ "phot_g_mean_mag",
49
+ "phot_bp_mean_mag",
50
+ "phot_rp_mean_mag",
51
+ "j_mag",
52
+ "h_mag",
53
+ "k_mag",
54
+ "w1_mag",
55
+ "w2_mag",
56
+ "w3_mag",
57
+ "w4_mag",
58
+ ],
59
+ "cos": ["l"],
60
+ "sin": ["b"],
61
+ "log": ["period"]
62
+ }
63
+
64
 
65
  class AstroM3Dataset(datasets.GeneratorBasedBuilder):
66
+ """Hugging Face dataset for AstroM3, a multi-modal variable star dataset."""
67
 
68
+ # Default configuration (used if no config is specified)
69
  DEFAULT_CONFIG_NAME = "full_42"
70
+
71
+ # Define dataset configurations (subsets, seeds, and normalization variants)
72
  BUILDER_CONFIGS = [
73
+ datasets.BuilderConfig(name=f"{sub}_{seed}{norm}", version=_VERSION)
74
  for sub in ["full", "sub10", "sub25", "sub50"]
75
  for seed in [42, 66, 0, 12, 123]
76
+ for norm in ["", "_norm"]
77
  ]
78
 
79
  def _info(self):
80
+ """Defines the dataset schema, including features and metadata."""
81
+
82
  return datasets.DatasetInfo(
83
  description=_DESCRIPTION,
84
  features=datasets.Features(
85
  {
86
+ "photometry": datasets.Array2D(shape=(None, 3), dtype="float32"),
87
+ "spectra": datasets.Array2D(shape=(None, 3), dtype="float32"),
88
+ "metadata": {
89
+ "meta_cols": {el: datasets.Value("float32") for el in _METADATA_COLS},
90
+ "photo_cols": {el: datasets.Value("float32") for el in _PHOTO_COLS},
91
+ },
92
  "label": datasets.Value("string"),
93
  }
94
  ),
 
98
  )
99
 
100
  def _get_photometry(self, file_name):
101
+ """Loads photometric light curve data from a compressed file."""
102
+
103
  csv = BytesIO()
104
+ file_name = file_name.replace(' ', '') # Ensure filenames are correctly formatted
105
  data_path = f'vardb_files/{file_name}.dat'
106
 
107
+ # Read the photometry file from the compressed ZIP
108
  csv.write(self.reader_v.read(data_path))
109
  csv.seek(0)
110
 
111
+ # Read light curve data
112
  lc = pd.read_csv(csv, sep=r'\s+', skiprows=2, names=['HJD', 'MAG', 'MAG_ERR', 'FLUX', 'FLUX_ERR'],
113
  dtype={'HJD': float, 'MAG': float, 'MAG_ERR': float, 'FLUX': float, 'FLUX_ERR': float})
114
 
 
116
 
117
  @staticmethod
118
  def _get_spectra(file_name):
119
+ """Loads spectral data from a FITS file."""
120
+
121
  hdulist = fits.open(file_name)
122
  len_list = len(hdulist)
123
 
 
143
 
144
  return np.vstack((wavelength, specflux, ivar)).T
145
 
146
+ @staticmethod
147
+ def transform(df):
148
+ """Applies transformations to metadata."""
149
+
150
+ for transformation_type, value in _METADATA_FUNC.items():
151
+ if transformation_type == "abs":
152
+ for col in value:
153
+ df[col] = (
154
+ df[col] - 10 + 5 * np.log10(np.where(df["parallax"] <= 0, 1, df["parallax"]))
155
+ )
156
+ elif transformation_type == "cos":
157
+ for col in value:
158
+ df[col] = np.cos(np.radians(df[col]))
159
+ elif transformation_type == "sin":
160
+ for col in value:
161
+ df[col] = np.sin(np.radians(df[col]))
162
+ elif transformation_type == "log":
163
+ for col in value:
164
+ df[col] = np.log10(df[col])
165
+
166
  def _split_generators(self, dl_manager):
167
+ """Defines dataset splits and downloads required files."""
168
 
169
  # Get subset and seed info from the name
170
+ name = self.config.name.split("_")
171
+ sub, seed = name[0], name[1]
172
 
173
  # Load the splits and info files
174
  urls = {
 
179
  }
180
  extracted_path = dl_manager.download(urls)
181
 
182
+ # Download all spectra files
183
  spectra_urls = {}
184
 
185
  for split in ("train", "val", "test"):
 
189
 
190
  spectra_files = dl_manager.download(spectra_urls)
191
 
192
+ # Download photometry data and initialize ZIP reader
193
  photometry_path = dl_manager.download(f"photometry.zip")
194
  self.reader_v = ParallelZipFile(photometry_path)
195
 
 
215
  ]
216
 
217
  def _generate_examples(self, csv_path, info_path, spectra_files, split):
218
+ """Yields individual dataset examples."""
219
 
220
  df = pd.read_csv(csv_path)
221
 
222
  with open(info_path) as f:
223
  info = json.loads(f.read())
224
 
225
+ if "norm" in self.config.name:
226
+ # Apply metadata transformations
227
+ self.transform(df)
228
+
229
+ # Normalize using precomputed mean and standard deviation
230
+ df[_ALL_COLS] = (df[_ALL_COLS] - info["mean"]) / info["std"]
231
+
232
  for idx, row in df.iterrows():
233
  photometry = self._get_photometry(row["name"])
234
  spectra = self._get_spectra(spectra_files[row["spec_filename"]])
 
236
  yield idx, {
237
  "photometry": photometry,
238
  "spectra": spectra,
239
+ "metadata": {
240
+ "meta_cols": {el: row[el] for el in _METADATA_COLS},
241
+ "photo_cols": {el: row[el] for el in _PHOTO_COLS},
242
+ },
243
  "label": row["target"],
244
  }