Datasets:
File size: 5,014 Bytes
7483238 718b85a b119d56 718b85a 09ee3cb b119d56 09ee3cb 718b85a 7483238 09ee3cb b119d56 718b85a 7483238 09ee3cb b119d56 718b85a 7483238 bd31a71 7483238 54cb3a0 7483238 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
"""PostOperative Dataset"""
from typing import List
from functools import partial
import datasets
import pandas
VERSION = datasets.Version("1.0.0")
_ENCODING_DICS = {
"internal_temperature": {
"low": 0,
"mid": 1,
"high": 2
},
"surface_temperature": {
"low": 0,
"mid": 1,
"high": 2
},
"oxigen_saturation": {
"good": 0,
"excellent": 1
},
"blood_pressure": {
"low": 0,
"mid": 1,
"high": 2
},
"is_surface_temperature_stable": {
"stable": True,
"unstable": False,
},
"is_internal_temperature_stable": {
"stable": 2,
"mod-stable": 1,
"unstable": 0,
},
"is_blood_pressure_stable": {
"stable": 2,
"mod-stable": 1,
"unstable": 0,
}
}
DESCRIPTION = "PostOperative dataset."
_HOMEPAGE = "https://archive-beta.ics.uci.edu/dataset/78/page+blocks+classification"
_URLS = ("https://archive-beta.ics.uci.edu/dataset/78/page+blocks+classification")
_CITATION = """
@misc{misc_post-operative_patient_82,
author = {Summers,Sharon & Woolery,Linda},
title = {{Post-Operative Patient}},
year = {1993},
howpublished = {UCI Machine Learning Repository},
note = {{DOI}: \\url{10.24432/C5DG6Q}}
}"""
# Dataset info
urls_per_split = {
"train": "https://huggingface.co/datasets/mstz/post_operative/raw/main/post_operative.data"
}
features_types_per_config = {
"post_operative": {
"internal_temperature": datasets.Value("int8"),
"surface_temperature": datasets.Value("int8"),
"oxigen_saturation": datasets.Value("int8"),
"blood_pressure": datasets.Value("int8"),
"is_surface_temperature_stable": datasets.Value("bool"),
"is_internal_temperature_stable": datasets.Value("int8"),
"is_blood_pressure_stable": datasets.Value("int8"),
"perceived_comfort": datasets.Value("int8"),
"decision": datasets.ClassLabel(num_classes=3, names=("discharge", "hospital floor", "intensive care")),
},
"post_operative_binary": {
"internal_temperature": datasets.Value("int8"),
"surface_temperature": datasets.Value("int8"),
"oxigen_saturation": datasets.Value("int8"),
"blood_pressure": datasets.Value("int8"),
"is_surface_temperature_stable": datasets.Value("bool"),
"is_internal_temperature_stable": datasets.Value("int8"),
"is_blood_pressure_stable": datasets.Value("int8"),
"perceived_comfort": datasets.Value("int8"),
"decision": datasets.ClassLabel(num_classes=2, names=("discharge", "don't discharge")),
}
}
features_per_config = {k: datasets.Features(features_types_per_config[k]) for k in features_types_per_config}
class PostOperativeConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super(PostOperativeConfig, self).__init__(version=VERSION, **kwargs)
self.features = features_per_config[kwargs["name"]]
class PostOperative(datasets.GeneratorBasedBuilder):
# dataset versions
DEFAULT_CONFIG = "post_operative"
BUILDER_CONFIGS = [
PostOperativeConfig(name="post_operative",
description="PostOperative for regression."),
PostOperativeConfig(name="post_operative_binary",
description="PostOperative for binary classification.")
]
def _info(self):
info = datasets.DatasetInfo(description=DESCRIPTION, citation=_CITATION, homepage=_HOMEPAGE,
features=features_per_config[self.config.name])
return info
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
downloads = dl_manager.download_and_extract(urls_per_split)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloads["train"]}),
]
def _generate_examples(self, filepath: str):
data = pandas.read_csv(filepath)
data = self.preprocess(data)
for row_id, row in data.iterrows():
data_row = dict(row)
yield row_id, data_row
def preprocess(self, data: pandas.DataFrame) -> pandas.DataFrame:
if self.config.name == "post_operative_binary":
data["decision"] = data["decision"].apply(lambda x: 1 if x >= 1 else 0)
for feature in _ENCODING_DICS:
encoding_function = partial(self.encode, feature)
data.loc[:, feature] = data[feature].apply(encoding_function)
data = data.reset_index()
data.drop("index", axis="columns", inplace=True)
data = data[data.perceived_comfort != "?"]
return data[list(features_types_per_config[self.config.name].keys())]
def encode(self, feature, value):
if feature in _ENCODING_DICS:
return _ENCODING_DICS[feature][value]
raise ValueError(f"Unknown feature: {feature}")
|