MeriDK commited on
Commit
17b9b62
·
1 Parent(s): 251c917

Added script to preprocess data

Browse files
Files changed (2) hide show
  1. AstroM3Dataset.py +0 -1
  2. preprocess.py +244 -0
AstroM3Dataset.py CHANGED
@@ -99,7 +99,6 @@ class AstroM3Dataset(datasets.GeneratorBasedBuilder):
99
 
100
  def _get_photometry(self, file_name):
101
  """Loads photometric light curve data from a compressed file."""
102
-
103
  csv = BytesIO()
104
  file_name = file_name.replace(' ', '') # Ensure filenames are correctly formatted
105
  data_path = f'vardb_files/{file_name}.dat'
 
99
 
100
  def _get_photometry(self, file_name):
101
  """Loads photometric light curve data from a compressed file."""
 
102
  csv = BytesIO()
103
  file_name = file_name.replace(' ', '') # Ensure filenames are correctly formatted
104
  data_path = f'vardb_files/{file_name}.dat'
preprocess.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+
3
+ import datasets
4
+ from datasets import load_dataset
5
+ import numpy as np
6
+ from scipy import stats
7
+
8
+ METADATA_FUNC = {
9
+ "abs": [
10
+ "mean_vmag",
11
+ "phot_g_mean_mag",
12
+ "phot_bp_mean_mag",
13
+ "phot_rp_mean_mag",
14
+ "j_mag",
15
+ "h_mag",
16
+ "k_mag",
17
+ "w1_mag",
18
+ "w2_mag",
19
+ "w3_mag",
20
+ "w4_mag",
21
+ ],
22
+ "cos": ["l"],
23
+ "sin": ["b"],
24
+ "log": ["period"]
25
+ }
26
+
27
+
28
+ def preprocess_spectra(example):
29
+ """
30
+ Preprocess spectral data. Steps:
31
+ - Interpolate flux and flux error to a fixed wavelength grid (3850 to 9000 Å).
32
+ - Normalize flux using mean and median absolute deviation (MAD).
33
+ - Append MAD as an auxiliary feature.
34
+ """
35
+ spectra = example['spectra']
36
+ wavelengths = spectra[:, 0]
37
+ flux = spectra[:, 1]
38
+ flux_err = spectra[:, 2]
39
+
40
+ # Interpolate flux and flux error onto a fixed grid
41
+ new_wavelengths = np.arange(3850, 9000, 2)
42
+ flux = np.interp(new_wavelengths, wavelengths, flux)
43
+ flux_err = np.interp(new_wavelengths, wavelengths, flux_err)
44
+
45
+ # Normalize flux and flux error
46
+ mean = np.mean(flux)
47
+ mad = stats.median_abs_deviation(flux[flux != 0])
48
+
49
+ flux = (flux - mean) / mad
50
+ flux_err = flux_err / mad
51
+ aux_values = np.full_like(flux, np.log10(mad)) # Store MAD as an auxiliary feature
52
+
53
+ # Stack processed data into a single array
54
+ spectra = np.vstack([flux, flux_err, aux_values])
55
+ example['spectra'] = spectra
56
+
57
+ return example
58
+
59
+
60
+ def preprocess_lc(example):
61
+ """
62
+ Preprocess photometry (light curve) data. Steps:
63
+ - Remove duplicate time entries.
64
+ - Sort by Heliocentric Julian Date (HJD).
65
+ - Normalize flux and flux error using mean and median absolute deviation (MAD).
66
+ - Scale time values between 0 and 1.
67
+ - Append auxiliary features (log MAD and time span delta_t).
68
+ """
69
+ X = example['photometry']
70
+ aux_values = np.stack(list(example['metadata']['photo_cols'].values()))
71
+
72
+ # Remove duplicate entries
73
+ X = np.unique(X, axis=0)
74
+
75
+ # Sort based on HJD
76
+ sorted_indices = np.argsort(X[:, 0])
77
+ X = X[sorted_indices]
78
+
79
+ # Normalize flux and flux error
80
+ mean = X[:, 1].mean()
81
+ mad = stats.median_abs_deviation(X[:, 1])
82
+ X[:, 1] = (X[:, 1] - mean) / mad
83
+ X[:, 2] = X[:, 2] / mad
84
+
85
+ # Compute delta_t (time span of the light curve in years)
86
+ delta_t = (X[:, 0].max() - X[:, 0].min()) / 365
87
+
88
+ # Scale time from 0 to 1
89
+ X[:, 0] = (X[:, 0] - X[:, 0].min()) / (X[:, 0].max() - X[:, 0].min())
90
+
91
+ # Add MAD and delta_t to auxiliary metadata features
92
+ aux_values = np.concatenate((aux_values, [np.log10(mad), delta_t]))
93
+
94
+ # Add auxiliary features to the sequence
95
+ aux_values = np.tile(aux_values, (X.shape[0], 1))
96
+ X = np.concatenate((X, aux_values), axis=-1)
97
+
98
+ example['photometry'] = X
99
+ return example
100
+
101
+
102
+ def transform_metadata(example):
103
+ """
104
+ Transforms the metadata of an example based on METADATA_FUNC.
105
+ """
106
+ metadata = example["metadata"]
107
+
108
+ # Process 'abs' transformation on meta_cols:
109
+ # Note: This transformation uses 'parallax' from meta_cols.
110
+ for col in METADATA_FUNC["abs"]:
111
+ if col in metadata["meta_cols"]:
112
+ # Use np.where to avoid issues when parallax is non-positive.
113
+ metadata["meta_cols"][col] = (
114
+ metadata["meta_cols"][col]
115
+ - 10
116
+ + 5 * np.log10(np.where(metadata["meta_cols"]["parallax"] <= 0, 1, metadata["meta_cols"]["parallax"]))
117
+ )
118
+
119
+ # Process 'cos' transformation on meta_cols:
120
+ for col in METADATA_FUNC["cos"]:
121
+ if col in metadata["meta_cols"]:
122
+ metadata["meta_cols"][col] = np.cos(np.radians(metadata["meta_cols"][col]))
123
+
124
+ # Process 'sin' transformation on meta_cols:
125
+ for col in METADATA_FUNC["sin"]:
126
+ if col in metadata["meta_cols"]:
127
+ metadata["meta_cols"][col] = np.sin(np.radians(metadata["meta_cols"][col]))
128
+
129
+ # Process 'log' transformation on photo_cols:
130
+ for col in METADATA_FUNC["log"]:
131
+ if col in metadata["photo_cols"]:
132
+ metadata["photo_cols"][col] = np.log10(metadata["photo_cols"][col])
133
+
134
+ # Update the example with the transformed metadata.
135
+ example["metadata"] = metadata
136
+ return example
137
+
138
+
139
+ def compute_metadata_stats(ds):
140
+ """
141
+ Compute the mean and standard deviation for each column in meta_cols and photo_cols.
142
+ """
143
+ meta_vals = defaultdict(list)
144
+ photo_vals = defaultdict(list)
145
+
146
+ # Accumulate values for each column
147
+ for example in ds:
148
+ meta = example["metadata"]["meta_cols"]
149
+ photo = example["metadata"]["photo_cols"]
150
+ for col, value in meta.items():
151
+ meta_vals[col].append(value)
152
+ for col, value in photo.items():
153
+ photo_vals[col].append(value)
154
+
155
+ # Compute mean and standard deviation for each column
156
+ stats = {"meta_cols": {}, "photo_cols": {}}
157
+ for col, values in meta_vals.items():
158
+ arr = np.stack(values)
159
+ stats["meta_cols"][col] = {"mean": arr.mean(), "std": arr.std()}
160
+ for col, values in photo_vals.items():
161
+ arr = np.stack(values)
162
+ stats["photo_cols"][col] = {"mean": arr.mean(), "std": arr.std()}
163
+
164
+ return stats
165
+
166
+
167
+ def normalize_metadata(example, info):
168
+ """
169
+ Normalize metadata values using z-score normalization:
170
+ (value - mean) / std.
171
+
172
+ The 'stats' parameter should be a dictionary with computed means and stds for both meta_cols and photo_cols.
173
+ """
174
+ metadata = example["metadata"]
175
+
176
+ # Normalize meta_cols
177
+ for col, value in metadata["meta_cols"].items():
178
+ mean = info["meta_cols"][col]["mean"]
179
+ std = info["meta_cols"][col]["std"]
180
+ metadata["meta_cols"][col] = (metadata["meta_cols"][col] - mean) / std
181
+
182
+ # Normalize photo_cols
183
+ for col, value in metadata["photo_cols"].items():
184
+ mean = info["photo_cols"][col]["mean"]
185
+ std = info["photo_cols"][col]["std"]
186
+ metadata["photo_cols"][col] = (metadata["photo_cols"][col] - mean) / std
187
+
188
+ example["metadata"] = metadata
189
+ return example
190
+
191
+
192
+ def preprocess_metadata(example):
193
+ """
194
+ Extract the values from 'meta_cols' and stack them into a numpy array.
195
+ """
196
+ example["metadata"] = np.stack(list(example["metadata"]["meta_cols"].values()))
197
+ return example
198
+
199
+
200
+ def main():
201
+ """
202
+ Main function for processing and uploading datasets.
203
+
204
+ - Loads each dataset based on subset and random seed.
205
+ - Applies preprocessing for spectra, photometry, and metadata.
206
+ - Casts columns to appropriate feature types.
207
+ - Pushes the processed dataset to Hugging Face Hub.
208
+ """
209
+ for sub in ["sub10", "sub25", "sub50", "full"]:
210
+ for seed in [42, 66, 0, 12, 123]:
211
+ name = f"{sub}_{seed}"
212
+ print(f"Processing: {name}")
213
+
214
+ # Load dataset from Hugging Face Hub
215
+ ds = load_dataset('MeriDK/AstroM3Dataset', name=name, trust_remote_code=True, num_proc=16)
216
+ ds = ds.with_format('numpy')
217
+
218
+ # Transform and normalize metadata
219
+ ds = ds.map(transform_metadata, num_proc=16)
220
+ info = compute_metadata_stats(ds['train'])
221
+ ds = ds.map(lambda example: normalize_metadata(example, info))
222
+
223
+ # Transform spectra
224
+ ds = ds.map(preprocess_spectra, num_proc=16)
225
+ ds = ds.cast_column('spectra', datasets.Array2D(shape=(3, 2575), dtype='float32'))
226
+
227
+ # Transform photometry
228
+ ds = ds.map(preprocess_lc, num_proc=16)
229
+ ds = ds.cast_column('photometry', datasets.Array2D(shape=(None, 9), dtype='float32'))
230
+
231
+ # Stack metadata into one numpy array
232
+ ds = ds.map(preprocess_metadata, num_proc=16)
233
+ ds = ds.cast_column('metadata', datasets.Sequence(feature=datasets.Value('float32'), length=34))
234
+
235
+ # Change label type
236
+ ds = ds.cast_column('label', datasets.ClassLabel(
237
+ names=['DSCT', 'EA', 'EB', 'EW', 'HADS', 'M', 'ROT', 'RRAB', 'RRC', 'SR']))
238
+
239
+ # Upload processed dataset to Hugging Face Hub
240
+ ds.push_to_hub('MeriDK/AstroM3Processed', config_name=name)
241
+
242
+
243
+ if __name__ == '__main__':
244
+ main()