Datasets:
Upload covertype.py
Browse files- covertype.py +18 -38
covertype.py
CHANGED
|
@@ -140,11 +140,8 @@ features_per_config = {k: datasets.Features(features_types_per_config[k]) for k
|
|
| 140 |
|
| 141 |
|
| 142 |
class CovertypeConfig(datasets.BuilderConfig):
|
| 143 |
-
def __init__(self
|
| 144 |
-
|
| 145 |
-
super(CovertypeConfig",
|
| 146 |
-
" self).__init__(version=VERSION",
|
| 147 |
-
" **kwargs)
|
| 148 |
self.features = features_per_config[kwargs["name"]]
|
| 149 |
|
| 150 |
|
|
@@ -152,7 +149,7 @@ class Covertype(datasets.GeneratorBasedBuilder):
|
|
| 152 |
# dataset versions
|
| 153 |
DEFAULT_CONFIG = "covertype"
|
| 154 |
BUILDER_CONFIGS = [
|
| 155 |
-
CovertypeConfig(name="covertype"
|
| 156 |
description="Covertype for multiclass classification.")
|
| 157 |
]
|
| 158 |
|
|
@@ -161,49 +158,32 @@ class Covertype(datasets.GeneratorBasedBuilder):
|
|
| 161 |
if self.config.name not in features_per_config:
|
| 162 |
raise ValueError(f"Unknown configuration: {self.config.name}")
|
| 163 |
|
| 164 |
-
info = datasets.DatasetInfo(description=DESCRIPTION
|
| 165 |
-
" citation=_CITATION",
|
| 166 |
-
" homepage=_HOMEPAGE",
|
| 167 |
features=features_per_config[self.config.name])
|
| 168 |
|
| 169 |
return info
|
| 170 |
|
| 171 |
-
def _split_generators(self
|
| 172 |
-
" dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
| 173 |
downloads = dl_manager.download_and_extract(urls_per_split)
|
| 174 |
|
| 175 |
return [
|
| 176 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN"
|
| 177 |
-
" gen_kwargs={"filepath": downloads["train"]})
|
| 178 |
]
|
| 179 |
|
| 180 |
-
def _generate_examples(self
|
| 181 |
-
" filepath: str):
|
| 182 |
# try:
|
| 183 |
# with gzip.open(filepath) as log:
|
| 184 |
-
# data = pandas.read_csv(log",
|
| 185 |
-
" header=None)
|
| 186 |
# except gzip.BadGzipFile:
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
" row in data.iterrows():
|
| 198 |
-
data_row = dict(row)
|
| 199 |
-
|
| 200 |
-
yield row_id",
|
| 201 |
-
" data_row
|
| 202 |
-
|
| 203 |
-
def preprocess(self",
|
| 204 |
-
" data: pandas.DataFrame",
|
| 205 |
-
" config: str = DEFAULT_CONFIG) -> pandas.DataFrame:
|
| 206 |
-
data.loc[:",
|
| 207 |
-
" "cover_type"] = data["cover_type"].apply(lambda x: x - 1)
|
| 208 |
|
| 209 |
return data
|
|
|
|
| 140 |
|
| 141 |
|
| 142 |
class CovertypeConfig(datasets.BuilderConfig):
|
| 143 |
+
def __init__(self, **kwargs):
|
| 144 |
+
super(CovertypeConfig, self).__init__(version=VERSION, **kwargs)
|
|
|
|
|
|
|
|
|
|
| 145 |
self.features = features_per_config[kwargs["name"]]
|
| 146 |
|
| 147 |
|
|
|
|
| 149 |
# dataset versions
|
| 150 |
DEFAULT_CONFIG = "covertype"
|
| 151 |
BUILDER_CONFIGS = [
|
| 152 |
+
CovertypeConfig(name="covertype",
|
| 153 |
description="Covertype for multiclass classification.")
|
| 154 |
]
|
| 155 |
|
|
|
|
| 158 |
if self.config.name not in features_per_config:
|
| 159 |
raise ValueError(f"Unknown configuration: {self.config.name}")
|
| 160 |
|
| 161 |
+
info = datasets.DatasetInfo(description=DESCRIPTION, citation=_CITATION, homepage=_HOMEPAGE,
|
|
|
|
|
|
|
| 162 |
features=features_per_config[self.config.name])
|
| 163 |
|
| 164 |
return info
|
| 165 |
|
| 166 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
|
|
|
| 167 |
downloads = dl_manager.download_and_extract(urls_per_split)
|
| 168 |
|
| 169 |
return [
|
| 170 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloads["train"]})
|
|
|
|
| 171 |
]
|
| 172 |
|
| 173 |
+
def _generate_examples(self, filepath: str):
|
|
|
|
| 174 |
# try:
|
| 175 |
# with gzip.open(filepath) as log:
|
| 176 |
+
# data = pandas.read_csv(log", header=None)
|
|
|
|
| 177 |
# except gzip.BadGzipFile:
|
| 178 |
+
data = pandas.read_csv(filepath, header=None)
|
| 179 |
+
data.columns = _BASE_FEATURE_NAMES
|
| 180 |
+
data = self.preprocess(data, config=self.config.name)
|
| 181 |
+
|
| 182 |
+
for row_id, row in data.iterrows():
|
| 183 |
+
data_row = dict(row)
|
| 184 |
+
yield row_id, data_row
|
| 185 |
+
|
| 186 |
+
def preprocess(self, data: pandas.DataFrame, config: str = DEFAULT_CONFIG) -> pandas.DataFrame:
|
| 187 |
+
data.loc[:, "cover_type"] = data["cover_type"].apply(lambda x: x - 1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 188 |
|
| 189 |
return data
|