Datasets:

ArXiv:
License:
polinaeterna commited on
Commit
2201c7f
·
1 Parent(s): 00846d9

update script to incorporate data from records

Browse files
Files changed (1) hide show
  1. evi.py +56 -14
evi.py CHANGED
@@ -20,9 +20,10 @@ for spoken dialogue systems.
20
 
21
 
22
  import csv
 
23
  import json
24
  import os
25
- from pathlib import Path
26
 
27
  import datasets
28
 
@@ -58,10 +59,14 @@ _HOMEPAGE = "https://github.com/PolyAI-LDN/evi-paper"
58
 
59
  _BASE_URL = "https://huggingface.co/datasets/PolyAI/evi/resolve/main/data"
60
 
61
- _TEXT_URL = {
62
  lang: os.path.join(_BASE_URL, f"dialogues.{lang.split('-')[0]}.tsv") for lang in _LANGS
63
  }
64
 
 
 
 
 
65
  _AUDIO_DATA_URL = "https://poly-public-data.s3.eu-west-2.amazonaws.com/evi-paper/audios.zip" # noqa
66
 
67
  _VERSION = datasets.Version("0.0.1", "")
@@ -86,14 +91,21 @@ class Evi(datasets.GeneratorBasedBuilder):
86
  features = datasets.Features(
87
  {
88
  "language": datasets.ClassLabel(names=_LANGS),
 
 
89
  "dialogue_id": datasets.Value("string"),
90
  "speaker_id": datasets.Value("string"),
91
  "turn_id": datasets.Value("int32"),
92
  "target_profile_id": datasets.Value("string"),
93
- "asr_transcription": datasets.Value("string"),
94
  "asr_nbest": datasets.Sequence(datasets.Value("string")),
95
  "path": datasets.Value("string"),
96
- "audio": datasets.Audio(sampling_rate=8_000),
 
 
 
 
 
 
97
  }
98
  )
99
 
@@ -108,10 +120,14 @@ class Evi(datasets.GeneratorBasedBuilder):
108
 
109
  def _split_generators(self, dl_manager):
110
  langs = self.config.languages
 
 
 
111
  lang2text_urls = {
112
- lang: _TEXT_URL[lang] for lang in langs
113
  }
114
- lang2text_paths = dl_manager.download_and_extract(lang2text_urls)
 
115
  audio_data_path = dl_manager.download_and_extract(_AUDIO_DATA_URL)
116
 
117
  return [
@@ -119,20 +135,32 @@ class Evi(datasets.GeneratorBasedBuilder):
119
  name=datasets.Split.TEST,
120
  gen_kwargs={
121
  "audio_data_path": audio_data_path,
122
- "text_paths": lang2text_paths,
 
123
  },
124
  )
125
  ]
126
 
127
- def _generate_examples(self, audio_data_path, text_paths):
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
- for lang in text_paths.keys():
130
- text_path = text_paths[lang]
131
  with open(text_path, encoding="utf-8") as fin:
132
- reader = csv.DictReader(
133
  fin, delimiter="\t", skipinitialspace=True
134
  )
135
- for dictrow in reader:
 
136
  dialogue_id = dictrow["dialogue_id"]
137
  turn_id = dictrow["turn_num"]
138
  file_path = os.path.join(
@@ -142,15 +170,29 @@ class Evi(datasets.GeneratorBasedBuilder):
142
  f'{turn_id}.wav'
143
  )
144
  full_path = os.path.join(audio_data_path, file_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
145
 
146
  yield file_path, {
147
  "language": lang,
 
148
  "dialogue_id": dialogue_id,
149
  "speaker_id": dictrow["speaker_id"],
150
  "turn_id": turn_id,
151
- "target_profile_id": dictrow["scenario_id"],
152
  "asr_transcription": dictrow["transcription"],
153
  "asr_nbest": json.loads(dictrow["nbest"]),
154
  "path": file_path,
155
- "audio": str(full_path)
156
  }
 
20
 
21
 
22
  import csv
23
+ from datetime import datetime
24
  import json
25
  import os
26
+ import warnings
27
 
28
  import datasets
29
 
 
59
 
60
  _BASE_URL = "https://huggingface.co/datasets/PolyAI/evi/resolve/main/data"
61
 
62
+ _TEXTS_URL = {
63
  lang: os.path.join(_BASE_URL, f"dialogues.{lang.split('-')[0]}.tsv") for lang in _LANGS
64
  }
65
 
66
+ _RECORDS_URL = {
67
+ lang: os.path.join(_BASE_URL, f"records.{lang.split('-')[0]}.csv") for lang in _LANGS
68
+ }
69
+
70
  _AUDIO_DATA_URL = "https://poly-public-data.s3.eu-west-2.amazonaws.com/evi-paper/audios.zip" # noqa
71
 
72
  _VERSION = datasets.Version("0.0.1", "")
 
91
  features = datasets.Features(
92
  {
93
  "language": datasets.ClassLabel(names=_LANGS),
94
+ "audio": datasets.Audio(sampling_rate=8_000),
95
+ "asr_transcription": datasets.Value("string"),
96
  "dialogue_id": datasets.Value("string"),
97
  "speaker_id": datasets.Value("string"),
98
  "turn_id": datasets.Value("int32"),
99
  "target_profile_id": datasets.Value("string"),
 
100
  "asr_nbest": datasets.Sequence(datasets.Value("string")),
101
  "path": datasets.Value("string"),
102
+ "postcode": datasets.Value("string"),
103
+ "name": datasets.Value("string"),
104
+ "dob": datasets.Value("date64"),
105
+ "name_first": datasets.Value("string"),
106
+ "name_last": datasets.Value("string"),
107
+ "sex": datasets.ClassLabel(names=["F", "M"]), # TODO: are there other genders or Nones?
108
+ "email": datasets.Value("string"),
109
  }
110
  )
111
 
 
120
 
121
  def _split_generators(self, dl_manager):
122
  langs = self.config.languages
123
+ lang2records_urls = {
124
+ lang: _RECORDS_URL[lang] for lang in langs
125
+ }
126
  lang2text_urls = {
127
+ lang: _TEXTS_URL[lang] for lang in langs
128
  }
129
+ records_paths = dl_manager.download_and_extract(lang2records_urls)
130
+ text_paths = dl_manager.download_and_extract(lang2text_urls)
131
  audio_data_path = dl_manager.download_and_extract(_AUDIO_DATA_URL)
132
 
133
  return [
 
135
  name=datasets.Split.TEST,
136
  gen_kwargs={
137
  "audio_data_path": audio_data_path,
138
+ "text_paths": text_paths,
139
+ "records_paths": records_paths,
140
  },
141
  )
142
  ]
143
 
144
+ def _generate_examples(self, audio_data_path, text_paths, records_paths):
145
+ for lang, text_path in text_paths.items():
146
+
147
+ records_path = records_paths[lang]
148
+ records = dict()
149
+ with open(records_path, encoding="utf-8") as fin:
150
+ records_reader = csv.DictReader(
151
+ fin, delimiter=",", skipinitialspace=True
152
+ )
153
+ for row in records_reader:
154
+ records[row["scenario_id"]] = row
155
+ records[row["scenario_id"]]["dob"] = datetime.strptime(row["dob"], "%Y-%m-%d")
156
+ _ = records[row["scenario_id"]].pop("scenario_id")
157
 
 
 
158
  with open(text_path, encoding="utf-8") as fin:
159
+ texts_reader = csv.DictReader(
160
  fin, delimiter="\t", skipinitialspace=True
161
  )
162
+ i = 0
163
+ for dictrow in texts_reader:
164
  dialogue_id = dictrow["dialogue_id"]
165
  turn_id = dictrow["turn_num"]
166
  file_path = os.path.join(
 
170
  f'{turn_id}.wav'
171
  )
172
  full_path = os.path.join(audio_data_path, file_path)
173
+ if not full_path.startswith("zip") and not os.path.isfile(full_path):
174
+ warnings.warn(f"{full_path} not found.")
175
+ continue
176
+
177
+ target_profile_id = dictrow["scenario_id"]
178
+ if target_profile_id not in records:
179
+ warnings.warn(
180
+ f"""
181
+ Record with scenario_id {target_profile_id} not found, ignoring this dialogue.
182
+ Full dialogue info: {dictrow}
183
+ """
184
+ )
185
+ continue
186
 
187
  yield file_path, {
188
  "language": lang,
189
+ "audio": str(full_path),
190
  "dialogue_id": dialogue_id,
191
  "speaker_id": dictrow["speaker_id"],
192
  "turn_id": turn_id,
193
+ "target_profile_id": target_profile_id,
194
  "asr_transcription": dictrow["transcription"],
195
  "asr_nbest": json.loads(dictrow["nbest"]),
196
  "path": file_path,
197
+ **records[target_profile_id]
198
  }