MeriDK commited on
Commit
88109f2
·
verified ·
1 Parent(s): 836a9e1

Add files using upload-large-folder tool

Browse files
Files changed (4) hide show
  1. .DS_Store +0 -0
  2. .idea/AstroM3Dataset.iml +8 -0
  3. .idea/workspace.xml +12 -0
  4. AstroM3Dataset.py +69 -55
.DS_Store ADDED
Binary file (6.15 kB). View file
 
.idea/AstroM3Dataset.iml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$" />
5
+ <orderEntry type="jdk" jdkName="astro (2)" jdkType="Python SDK" />
6
+ <orderEntry type="sourceFolder" forTests="false" />
7
+ </component>
8
+ </module>
.idea/workspace.xml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectViewState">
4
+ <option name="hideEmptyMiddlePackages" value="true" />
5
+ <option name="showLibraryContents" value="true" />
6
+ </component>
7
+ <component name="PropertiesComponent">{
8
+ &quot;keyToString&quot;: {
9
+ &quot;settings.editor.selected.configurable&quot;: &quot;ssh.settings&quot;
10
+ }
11
+ }</component>
12
+ </project>
AstroM3Dataset.py CHANGED
@@ -1,6 +1,5 @@
1
  import os
2
  from io import BytesIO
3
- from pathlib import Path
4
  import datasets
5
  import pandas as pd
6
  import numpy as np
@@ -22,7 +21,7 @@ _DESCRIPTION = (
22
 
23
  _HOMEPAGE = "https://huggingface.co/datasets/AstroM3"
24
  _LICENSE = "CC BY 4.0"
25
- _URL = "https://huggingface.co/datasets/AstroM3"
26
  _VERSION = datasets.Version("1.0.0")
27
 
28
  _CITATION = """
@@ -41,68 +40,27 @@ class AstroM3Dataset(datasets.GeneratorBasedBuilder):
41
  DEFAULT_CONFIG_NAME = "full_42"
42
 
43
  BUILDER_CONFIGS = [
44
- datasets.BuilderConfig(
45
- name=f"{sub}_{seed}",
46
- version=_VERSION
47
- )
48
  for sub in ["full", "sub10", "sub25", "sub50"]
49
  for seed in [42, 66, 0, 12, 123]
50
  ]
51
 
52
- def __init__(self, **kwargs):
53
- super().__init__(**kwargs)
54
-
55
- # Auto-detect dataset location: use current working directory
56
- if not hasattr(self.config, "data_dir") or self.config.data_dir is None:
57
- self.config.data_dir = Path(os.getcwd()).resolve()
58
- print(f"Using dataset location: {self.config.data_dir}")
59
-
60
  def _info(self):
61
  return datasets.DatasetInfo(
62
  description=_DESCRIPTION,
63
  features=datasets.Features(
64
  {
65
- "id": datasets.Value("string"),
66
  "photometry": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=3)),
67
  "spectra": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=3)),
68
  "metadata": datasets.Sequence(datasets.Value("float32"), length=25),
69
  "label": datasets.Value("string"),
70
  }
71
  ),
72
- supervised_keys=("photometry", "label"),
73
  homepage=_HOMEPAGE,
74
  license=_LICENSE,
75
  citation=_CITATION,
76
  )
77
 
78
- def _split_generators(self, dl_manager):
79
- """Returns SplitGenerators for train, val, and test."""
80
- self.config.data_dir = Path(self.config.data_dir)
81
- sub, seed = self.config.name.split("_")
82
- data_root = self.config.data_dir / "splits" / sub / seed
83
- info_path = data_root / "info.json"
84
-
85
- if not info_path.exists():
86
- raise FileNotFoundError(f"Missing info.json file: {info_path}")
87
-
88
- with open(info_path, "r") as f:
89
- self.dataset_info = json.load(f)
90
-
91
- # Init reader for photometry
92
- self.reader_v = ZipFile(Path(self.config.data_dir) / 'asassnvarlc_vband_complete.zip')
93
-
94
- return [
95
- datasets.SplitGenerator(
96
- name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_root / "train.csv"}
97
- ),
98
- datasets.SplitGenerator(
99
- name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_root / "val.csv"}
100
- ),
101
- datasets.SplitGenerator(
102
- name=datasets.Split.TEST, gen_kwargs={"filepath": data_root / "test.csv"}
103
- ),
104
- ]
105
-
106
  def _get_photometry(self, file_name):
107
  csv = BytesIO()
108
  file_name = file_name.replace(' ', '')
@@ -143,22 +101,78 @@ class AstroM3Dataset(datasets.GeneratorBasedBuilder):
143
 
144
  return np.vstack((wavelength, specflux, ivar)).T
145
 
146
- def _generate_examples(self, filepath):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  """Yields examples from a CSV file containing photometry, spectra, metadata, and labels."""
148
- if not filepath.exists():
149
- raise FileNotFoundError(f"Missing dataset file: {filepath}")
150
 
151
- df = pd.read_csv(filepath)
 
 
 
 
 
 
 
 
 
152
 
153
  for idx, row in df.iterrows():
154
- photometry = self._get_photometry(row['name'])
155
- spectra = np.zeros((200, 3)) # (None, 3)
156
- metadata = np.zeros(25) # (25,)
157
 
158
  yield idx, {
159
- "id": str(row["id"]),
160
- "photometry": photometry.tolist(), # Convert back to list for HF compatibility
161
- "spectra": spectra.tolist(),
162
- "metadata": metadata.tolist(),
163
  "label": row["target"],
164
  }
 
1
  import os
2
  from io import BytesIO
 
3
  import datasets
4
  import pandas as pd
5
  import numpy as np
 
21
 
22
  _HOMEPAGE = "https://huggingface.co/datasets/AstroM3"
23
  _LICENSE = "CC BY 4.0"
24
+ _URL = "https://huggingface.co/datasets/MeriDK/AstroM3Dataset/resolve/main"
25
  _VERSION = datasets.Version("1.0.0")
26
 
27
  _CITATION = """
 
40
  DEFAULT_CONFIG_NAME = "full_42"
41
 
42
  BUILDER_CONFIGS = [
43
+ datasets.BuilderConfig(name=f"{sub}_{seed}", version=_VERSION, data_dir=None)
 
 
 
44
  for sub in ["full", "sub10", "sub25", "sub50"]
45
  for seed in [42, 66, 0, 12, 123]
46
  ]
47
 
 
 
 
 
 
 
 
 
48
  def _info(self):
49
  return datasets.DatasetInfo(
50
  description=_DESCRIPTION,
51
  features=datasets.Features(
52
  {
 
53
  "photometry": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=3)),
54
  "spectra": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=3)),
55
  "metadata": datasets.Sequence(datasets.Value("float32"), length=25),
56
  "label": datasets.Value("string"),
57
  }
58
  ),
 
59
  homepage=_HOMEPAGE,
60
  license=_LICENSE,
61
  citation=_CITATION,
62
  )
63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  def _get_photometry(self, file_name):
65
  csv = BytesIO()
66
  file_name = file_name.replace(' ', '')
 
101
 
102
  return np.vstack((wavelength, specflux, ivar)).T
103
 
104
+ def _split_generators(self, dl_manager):
105
+ """Returns SplitGenerators for train, val, and test."""
106
+
107
+ # Get subset and seed info from the name
108
+ sub, seed = self.config.name.split("_")
109
+
110
+ # Load the splits and info files
111
+ urls = {
112
+ "train": f"{_URL}/splits/{sub}/{seed}/train.csv",
113
+ "val": f"{_URL}/splits/{sub}/{seed}/val.csv",
114
+ "test": f"{_URL}/splits/{sub}/{seed}/test.csv",
115
+ "info": f"{_URL}/splits/{sub}/{seed}/info.json",
116
+ }
117
+ extracted_path = dl_manager.download_and_extract(urls)
118
+
119
+ # Load all spectra files
120
+ spectra_urls = {}
121
+
122
+ for split in ["train", "val", "test"]:
123
+ df = pd.read_csv(extracted_path[split])
124
+ for _, row in df.iterrows():
125
+ spectra_url = f"{_URL}/spectra/{split}/{row['target']}/{row['spec_filename']}"
126
+ spectra_urls[row["spec_filename"]] = spectra_url
127
+
128
+ spectra = dl_manager.download_and_extract(spectra_urls)
129
+
130
+ # Load photometry and init reader
131
+ photometry_path = dl_manager.download(f"{_URL}/photometry.zip")
132
+ self.reader_v = ZipFile(photometry_path)
133
+
134
+ return [
135
+ datasets.SplitGenerator(
136
+ name=datasets.Split.TRAIN, gen_kwargs={"csv_path": extracted_path["train"],
137
+ "info_path": extracted_path["info"],
138
+ "spectra": spectra,
139
+ "split": "train"}
140
+ ),
141
+ datasets.SplitGenerator(
142
+ name=datasets.Split.VALIDATION, gen_kwargs={"csv_path": extracted_path["val"],
143
+ "info_path": extracted_path["info"],
144
+ "spectra": spectra,
145
+ "split": "val"}
146
+ ),
147
+ datasets.SplitGenerator(
148
+ name=datasets.Split.TEST, gen_kwargs={"csv_path": extracted_path["test"],
149
+ "info_path": extracted_path["info"],
150
+ "spectra": spectra,
151
+ "split": "test"}
152
+ ),
153
+ ]
154
+
155
+ def _generate_examples(self, csv_path, info_path, spectra, split):
156
  """Yields examples from a CSV file containing photometry, spectra, metadata, and labels."""
 
 
157
 
158
+ if not os.path.exists(csv_path):
159
+ raise FileNotFoundError(f"Missing dataset file: {csv_path}")
160
+
161
+ if not os.path.exists(info_path):
162
+ raise FileNotFoundError(f"Missing info file: {info_path}")
163
+
164
+ df = pd.read_csv(csv_path)
165
+
166
+ with open(info_path) as f:
167
+ info = json.loads(f.read())
168
 
169
  for idx, row in df.iterrows():
170
+ photometry = self._get_photometry(row["name"])
171
+ spectra = self._get_spectra(spectra[row['spec_filename']])
 
172
 
173
  yield idx, {
174
+ "photometry": photometry,
175
+ "spectra": spectra,
176
+ "metadata": row[info["all_cols"]],
 
177
  "label": row["target"],
178
  }