Datasets:
Upload knessetCorpus.py
Browse files- knessetCorpus.py +27 -51
knessetCorpus.py
CHANGED
@@ -357,8 +357,7 @@ class KnessetCorpus(datasets.GeneratorBasedBuilder):
|
|
357 |
|
358 |
def _generate_examples(self, data_files):
|
359 |
"""
|
360 |
-
Generate examples
|
361 |
-
Handles `.bz2` and `.zip` files based on mode.
|
362 |
"""
|
363 |
if "all_features_sentences" in self.config.name:
|
364 |
id_field_name = "sentence_id"
|
@@ -373,54 +372,31 @@ class KnessetCorpus(datasets.GeneratorBasedBuilder):
|
|
373 |
|
374 |
for filepath in data_files:
|
375 |
try:
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
393 |
except Exception as e:
|
394 |
print(f"Error processing file '{filepath}': {e}. Skipping.")
|
395 |
-
|
396 |
-
def _process_line(self, line, id_field_name, line_number, filepath):
|
397 |
-
"""
|
398 |
-
Process a single line of input, handling JSON decoding and nested structures.
|
399 |
-
"""
|
400 |
-
try:
|
401 |
-
row = json.loads(line)
|
402 |
-
except json.JSONDecodeError as e:
|
403 |
-
print(f"Failed to decode JSON at line {line_number} in file {filepath}: {e}. Skipping.")
|
404 |
-
return
|
405 |
-
|
406 |
-
# Extract the ID field
|
407 |
-
id_ = row.get(id_field_name, f"unknown_{line_number}")
|
408 |
-
if not id_:
|
409 |
-
print(f"Key '{id_field_name}' not found in row at line {line_number}. Skipping row.")
|
410 |
-
return
|
411 |
-
|
412 |
-
# Prepare the sample dictionary
|
413 |
-
sample = {feature: row.get(feature, None) for feature in self._info().features}
|
414 |
-
|
415 |
-
# Handle nested structures (e.g., `protocol_sentences`)
|
416 |
-
if "protocol_sentences" in row:
|
417 |
-
protocol_sentences = row.get("protocol_sentences", [])
|
418 |
-
for sentence in protocol_sentences:
|
419 |
-
sentence_id = sentence.get("sentence_id", None)
|
420 |
-
if sentence_id:
|
421 |
-
yield sentence_id, sentence
|
422 |
-
else:
|
423 |
-
try:
|
424 |
-
yield id_, sample
|
425 |
-
except Exception as e:
|
426 |
-
print(f"Failed to yield sample at line {line_number} in file {filepath}. Error: {e}. Sample: {sample}.")
|
|
|
357 |
|
358 |
def _generate_examples(self, data_files):
|
359 |
"""
|
360 |
+
Generate examples from files, supporting both streaming and non-streaming modes.
|
|
|
361 |
"""
|
362 |
if "all_features_sentences" in self.config.name:
|
363 |
id_field_name = "sentence_id"
|
|
|
372 |
|
373 |
for filepath in data_files:
|
374 |
try:
|
375 |
+
# Hugging Face automatically handles streaming
|
376 |
+
for line_number, row in enumerate(filepath):
|
377 |
+
try:
|
378 |
+
row = json.loads(row)
|
379 |
+
except json.JSONDecodeError as e:
|
380 |
+
print(f"Failed to decode JSON at line {line_number} in file {filepath}: {e}. Skipping.")
|
381 |
+
continue
|
382 |
+
|
383 |
+
# Extract the ID field
|
384 |
+
id_ = row.get(id_field_name, f"unknown_{line_number}")
|
385 |
+
if not id_:
|
386 |
+
print(f"Key '{id_field_name}' not found in row at line {line_number}. Skipping row.")
|
387 |
+
continue
|
388 |
+
|
389 |
+
# Prepare the sample dictionary
|
390 |
+
sample = {feature: row.get(feature, None) for feature in self._info().features}
|
391 |
+
|
392 |
+
# Handle nested structures (e.g., `protocol_sentences`)
|
393 |
+
if "protocol_sentences" in row:
|
394 |
+
protocol_sentences = row.get("protocol_sentences", [])
|
395 |
+
for sentence in protocol_sentences:
|
396 |
+
sentence_id = sentence.get("sentence_id", None)
|
397 |
+
if sentence_id:
|
398 |
+
yield sentence_id, sentence
|
399 |
+
else:
|
400 |
+
yield id_, sample
|
401 |
except Exception as e:
|
402 |
print(f"Error processing file '{filepath}': {e}. Skipping.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|