ujs commited on
Commit
a36b7e5
·
1 Parent(s): 85375b2
Files changed (1) hide show
  1. hinglish.py +18 -11
hinglish.py CHANGED
@@ -3,14 +3,24 @@ import json
3
  import os
4
  import datasets
5
 
 
 
 
 
 
6
  _ANNOT_URL = {
7
- "train": "https://huggingface.co/datasets/ujs/hinglish/resolve/main/data/metadata.csv",
8
- "test": "https://huggingface.co/datasets/ujs/hinglish/resolve/main/data/metadata-test.csv"
9
  }
10
 
 
 
 
 
 
11
  _DATA_URL = [
12
- "https://huggingface.co/datasets/ujs/hinglish/blob/main/data/train.tar.gz",
13
- "https://huggingface.co/datasets/ujs/hinglish/blob/main/data/test.tar.gz"
14
  ]
15
 
16
  _DESCRIPTION = """\
@@ -28,21 +38,19 @@ class HinglishDataset(datasets.GeneratorBasedBuilder):
28
  "audio": datasets.Audio(sampling_rate=16_000),
29
  "sentence": datasets.Value("string"),
30
  }),
31
- supervised_keys=("audio", "sentence"),
32
  )
33
 
34
  def _split_generators(self, dl_manager):
35
  prompts_paths = dl_manager.download(_ANNOT_URL)
36
  archive = dl_manager.download(_DATA_URL)
37
- train_dir = "hinglish/data/train"
38
- test_dir = "hinglish/data/test"
39
-
40
  return [
41
  datasets.SplitGenerator(
42
  name=datasets.Split.TRAIN,
43
  gen_kwargs={
44
  "prompts_path": prompts_paths["train"],
45
- "path_to_clips": train_dir,
46
  "audio_files": dl_manager.iter_archive(archive),
47
  },
48
  ),
@@ -50,7 +58,7 @@ class HinglishDataset(datasets.GeneratorBasedBuilder):
50
  name=datasets.Split.TEST,
51
  gen_kwargs={
52
  "prompts_path": prompts_paths["test"],
53
- "path_to_clips": test_dir,
54
  "audio_files": dl_manager.iter_archive(archive),
55
  },
56
  ),
@@ -61,7 +69,6 @@ class HinglishDataset(datasets.GeneratorBasedBuilder):
61
  with open(prompts_path, encoding="utf-8") as f:
62
  for row in f:
63
  data = row.strip().split(",")
64
- print(data)
65
  audio_path = "/".join([path_to_clips, data[0]])
66
  examples[audio_path] = {
67
  "path": audio_path,
 
3
  import os
4
  import datasets
5
 
6
+ # _ANNOT_URL = {
7
+ # "train": "https://huggingface.co/datasets/ujs/hinglish/resolve/main/data/metadata.csv",
8
+ # "test": "https://huggingface.co/datasets/ujs/hinglish/resolve/main/data/metadata-test.csv",
9
+ # }
10
+
11
  _ANNOT_URL = {
12
+ "train": "./data/metadata.csv",
13
+ "test": "./data/metadata-test.csv"
14
  }
15
 
16
+ # _DATA_URL = [
17
+ # "https://huggingface.co/datasets/ujs/hinglish/resolve/main/data/train.tar.gz",
18
+ # "https://huggingface.co/datasets/ujs/hinglish/resolve/main/data/test.tar.gz"
19
+ # ]
20
+
21
  _DATA_URL = [
22
+ "./data/train.tar.gz",
23
+ "./data/test.tar.gz"
24
  ]
25
 
26
  _DESCRIPTION = """\
 
38
  "audio": datasets.Audio(sampling_rate=16_000),
39
  "sentence": datasets.Value("string"),
40
  }),
41
+ supervised_keys=None,
42
  )
43
 
44
  def _split_generators(self, dl_manager):
45
  prompts_paths = dl_manager.download(_ANNOT_URL)
46
  archive = dl_manager.download(_DATA_URL)
47
+ data_dir = './data'
 
 
48
  return [
49
  datasets.SplitGenerator(
50
  name=datasets.Split.TRAIN,
51
  gen_kwargs={
52
  "prompts_path": prompts_paths["train"],
53
+ "path_to_clips": data_dir,
54
  "audio_files": dl_manager.iter_archive(archive),
55
  },
56
  ),
 
58
  name=datasets.Split.TEST,
59
  gen_kwargs={
60
  "prompts_path": prompts_paths["test"],
61
+ "path_to_clips": data_dir,
62
  "audio_files": dl_manager.iter_archive(archive),
63
  },
64
  ),
 
69
  with open(prompts_path, encoding="utf-8") as f:
70
  for row in f:
71
  data = row.strip().split(",")
 
72
  audio_path = "/".join([path_to_clips, data[0]])
73
  examples[audio_path] = {
74
  "path": audio_path,