File size: 3,097 Bytes
f57ae1a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 |
#!/usr/bin/python
import datasets
import pyarrow as pa
import pyarrow.parquet as pq
BASE_DATASET = "ejschwartz/oo-method-test"
class OOMethodTestDataset(datasets.ArrowBasedBuilder):
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="combined",
version=datasets.Version("1.0.0"),
description="All data files combined",
),
datasets.BuilderConfig(
name="byrow",
version=datasets.Version("1.0.0"),
description="Split by example (dumb)",
),
datasets.BuilderConfig(
name="byfuncname",
version=datasets.Version("1.0.0"),
description="Split by function name",
)
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _info(self):
return datasets.DatasetInfo()
def _split_generators(self, dl_manager):
ds = datasets.load_dataset(BASE_DATASET)
#print(files)
#print(downloaded_files)
if self.config.name == "combined":
return [
datasets.SplitGenerator(
name="combined",
gen_kwargs={
"ds": ds['combined'],
},
),
]
elif self.config.name == "byrow":
ds = ds['combined'].train_test_split(test_size=0.1, seed=42)
#print(ds)
return [
datasets.SplitGenerator(
name="train",
gen_kwargs={
"ds": ds['train'],
},
),
datasets.SplitGenerator(
name="test",
gen_kwargs={
"ds": ds['test'],
},
),
]
elif self.config.name == "byfuncname":
ds = ds['combined']
unique_names = ds.unique('Name')
nameds = datasets.Dataset.from_dict({'Name': unique_names})
name_split = nameds.train_test_split(test_size=0.1, seed=42)
#print(name_split)
train_name = name_split['train']['Name']
test_name = name_split['test']['Name']
return [
datasets.SplitGenerator(
name="train",
gen_kwargs={
"ds": ds.filter(lambda r: r['Name'] in train_name),
},
),
datasets.SplitGenerator(
name="test",
gen_kwargs={
"ds": ds.filter(lambda r: r['Name'] in test_name),
},
),
]
else:
assert False
def _generate_tables(self, ds):
# Converting to pandas is silly, but the old version of datasets doesn't
# seem to have a way to convert to Arrow?
for i, batch in enumerate(ds.to_pandas(batched=True)):
yield i, pa.Table.from_pandas(batch)
|