Datasets:

Languages:
English
License:
wzkariampuzha commited on
Commit
968a490
·
1 Parent(s): f2c92c2

Delete EpiClassify4GARD.py

Browse files
Files changed (1) hide show
  1. EpiClassify4GARD.py +0 -149
EpiClassify4GARD.py DELETED
@@ -1,149 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """INSERT TITLE"""
18
-
19
- import logging
20
- import datasets
21
- import csv
22
-
23
- _CITATION = """\
24
- *REDO*
25
-
26
- """
27
-
28
- _DESCRIPTION = """\
29
- **REWRITE*
30
-
31
- """
32
-
33
- _URL = "https://huggingface.co/datasets/ncats/GARD_EpiSet4TextClassification/raw/main/"
34
- #"https://huggingface.co/datasets/wzkariampuzha/EpiClassifySet/raw/main/"
35
- _TRAINING_FILE = "epi_classify_train.tsv"
36
- _VAL_FILE = "epi_classify_val.tsv"
37
- _TEST_FILE = "epi_classify_test.tsv"
38
-
39
-
40
- class EpiSetConfig(datasets.BuilderConfig):
41
- """BuilderConfig for Conll2003"""
42
-
43
- def __init__(self, **kwargs):
44
- """BuilderConfig forConll2003.
45
- Args:
46
- **kwargs: keyword arguments forwarded to super.
47
- """
48
- super(EpiSetConfig, self).__init__(**kwargs)
49
-
50
-
51
- class EpiSet(datasets.GeneratorBasedBuilder):
52
- """EpiSet4NER by GARD."""
53
-
54
- BUILDER_CONFIGS = [
55
- EpiSetConfig(name="EpiSet4NER", version=datasets.Version("1.0.0"), description="EpiSet4NER by NIH NCATS GARD"),
56
- ]
57
-
58
- def _info(self):
59
- return datasets.DatasetInfo(
60
- description=_DESCRIPTION,
61
- features=datasets.Features(
62
- {
63
- "idx": datasets.Value("string"),
64
- #"abstracts": datasets.Value("string"),
65
- "abstracts": datasets.Sequence(datasets.Value("string")),
66
- '''
67
- "labels": datasets.Sequence(
68
- datasets.features.ClassLabel(
69
- names=[
70
- "O", #(0)
71
- "B-LOC", #(1)
72
- "I-LOC", #(2)
73
- "B-EPI", #(3)
74
- "I-EPI", #(4)
75
- "B-STAT", #(5)
76
- "I-STAT", #(6)
77
- ]
78
- )
79
- ),
80
- '''
81
- "labels": datasets.features.ClassLabel(
82
- names=[
83
- "1 = Epi Abstract",
84
- "2 = Not Epi Abstract",
85
- ]
86
- ),
87
-
88
- }
89
- ),
90
- supervised_keys=None,
91
- homepage="https://github.com/ncats/epi4GARD/tree/master/Epi4GARD#epi4gard",
92
- citation=_CITATION,
93
- )
94
-
95
- def _split_generators(self, dl_manager):
96
- """Returns SplitGenerators."""
97
- urls_to_download = {
98
- "train": f"{_URL}{_TRAINING_FILE}",
99
- "val": f"{_URL}{_VAL_FILE}",
100
- "test": f"{_URL}{_TEST_FILE}",
101
- }
102
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
103
-
104
- return [
105
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
106
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["val"]}),
107
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
108
- ]
109
-
110
- def _generate_examples(self, filepath):
111
- logging.info("⏳ Generating examples from = %s", filepath)
112
-
113
- with open(filepath, encoding="utf-8") as f:
114
- data = csv.reader(f, delimiter="\t")
115
- next(data)
116
- for id_, row in enumerate(data):
117
- yield id_, {
118
- "text": row[0],
119
- "label": int(row[1]),
120
- }
121
- '''
122
- with open(filepath, encoding="utf-8") as f:
123
- guid = 0
124
- abstracts = []
125
- labels = []
126
- for line in f:
127
- if line.startswith("-DOCSTART-") or line == "" or line == "\n" or line == "abstract\tlabel\n":
128
- if abstracts:
129
- yield guid, {
130
- "idx": str(guid),
131
- "abstracts": abstracts,
132
- "labels": labels,
133
- }
134
- guid += 1
135
- abstracts = []
136
- labels = []
137
- else:
138
- # EpiSet abstracts are space separated
139
- splits = line.split("\t")
140
- abstracts.append(splits[0])
141
- labels.append(splits[1].rstrip())
142
- # last example
143
- if tokens:
144
- yield guid, {
145
- "idx": str(guid),
146
- "abstracts": abstracts,
147
- "labels": labels,
148
- }
149
- '''