sobir-hf commited on
Commit
2432e74
·
1 Parent(s): edd2ab8

Update tajik-text-segmentation.py

Browse files
Files changed (1) hide show
  1. tajik-text-segmentation.py +10 -4
tajik-text-segmentation.py CHANGED
@@ -12,9 +12,11 @@
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
 
 
15
  from .annotations_parser import load_yedda_annotations
16
 
17
  import datasets
 
18
 
19
 
20
  # Find for instance the citation on arxiv or on the dataset repo/website
@@ -37,7 +39,6 @@ _HOMEPAGE = ""
37
  _LICENSE = ""
38
 
39
 
40
-
41
  class TajikTextSegmentation(datasets.GeneratorBasedBuilder):
42
  """A dataset of sentence-wise text segmentation in Tajik language."""
43
 
@@ -70,11 +71,8 @@ class TajikTextSegmentation(datasets.GeneratorBasedBuilder):
70
  citation=_CITATION,
71
  )
72
  def _split_generators(self, dl_manager):
73
- # path = dl_manager.download_custom('https://huggingface.co/datasets/sobir-hf/tajik-text-segmentation/tree/main/annotations')
74
-
75
  path = snapshot_download(repo_id="sobir-hf/tajik-text-segmentation", repo_type='dataset')
76
  path = os.path.join(path, 'annotations')
77
- # path = dl_manager.download([f'annotations/{i:04}.ann' for i in range(110)])
78
  return [
79
  datasets.SplitGenerator(
80
  name=datasets.Split.TRAIN,
@@ -108,3 +106,11 @@ class TajikTextSegmentation(datasets.GeneratorBasedBuilder):
108
  }
109
 
110
 
 
 
 
 
 
 
 
 
 
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
 
15
+ import os
16
  from .annotations_parser import load_yedda_annotations
17
 
18
  import datasets
19
+ from huggingface_hub import snapshot_download
20
 
21
 
22
  # Find for instance the citation on arxiv or on the dataset repo/website
 
39
  _LICENSE = ""
40
 
41
 
 
42
  class TajikTextSegmentation(datasets.GeneratorBasedBuilder):
43
  """A dataset of sentence-wise text segmentation in Tajik language."""
44
 
 
71
  citation=_CITATION,
72
  )
73
  def _split_generators(self, dl_manager):
 
 
74
  path = snapshot_download(repo_id="sobir-hf/tajik-text-segmentation", repo_type='dataset')
75
  path = os.path.join(path, 'annotations')
 
76
  return [
77
  datasets.SplitGenerator(
78
  name=datasets.Split.TRAIN,
 
106
  }
107
 
108
 
109
+ if __name__ == '__main__':
110
+ # You can test the data generation by running this script.
111
+ # It will create a dataset in a subdirectory `./datasets/`.
112
+ from datasets import load_dataset
113
+ # dataset = load_dataset('./tajik-text-segmentation.py')
114
+ dataset = load_dataset('tajik-text-segmentation')
115
+ print(dataset)
116
+ print(dataset['train'][0])