Datasets:

GiliGold commited on
Commit
1c0eeed
·
verified ·
1 Parent(s): 6e954bc

Upload knessetCorpus.py

Browse files
Files changed (1) hide show
  1. knessetCorpus.py +50 -54
knessetCorpus.py CHANGED
@@ -356,6 +356,10 @@ class KnessetCorpus(datasets.GeneratorBasedBuilder):
356
  ]
357
 
358
  def _generate_examples(self, data_files):
 
 
 
 
359
  if "all_features_sentences" in self.config.name:
360
  id_field_name = "sentence_id"
361
  elif self.config.name == "protocols":
@@ -364,67 +368,59 @@ class KnessetCorpus(datasets.GeneratorBasedBuilder):
364
  id_field_name = "faction_id"
365
  elif self.config.name == "knessetMembers":
366
  id_field_name = "person_id"
 
 
367
 
368
  for filepath in data_files:
369
  try:
370
- # Streaming mode: Handle URLs directly
371
- if filepath.startswith("http"):
372
- with datasets.utils.stream_files(filepath) as f:
373
  for line_number, line in enumerate(f):
374
  yield from self._process_line(line, id_field_name, line_number, filepath)
 
 
 
 
 
 
 
375
  else:
376
- # Non-streaming mode: Handle local files
377
- if filepath.endswith(".bz2"):
378
- with bz2.open(filepath, "rt", encoding="utf-8") as f:
379
- for line_number, line in enumerate(f):
380
- yield from self._process_line(line, id_field_name, line_number, filepath)
381
- elif filepath.endswith(".zip"):
382
- with zipfile.ZipFile(filepath, "r") as archive:
383
- for filename in archive.namelist():
384
- with archive.open(filename) as f:
385
- for line_number, line in enumerate(io.TextIOWrapper(f, encoding="utf-8")):
386
- yield from self._process_line(line, id_field_name, line_number, filename)
387
- else:
388
- with open(filepath, "rt", encoding="utf-8") as f:
389
- for line_number, line in enumerate(f):
390
- yield from self._process_line(line, id_field_name, line_number, filepath)
391
-
392
  except Exception as e:
393
  print(f"Error processing file '{filepath}': {e}. Skipping.")
394
 
395
-
396
-
397
-
398
-
399
-
400
- def _process_line(self, line, id_field_name, line_number, filepath):
401
- """
402
- Process a single line of input, handling JSON decoding and nested structures.
403
- """
404
- try:
405
- row = json.loads(line)
406
- except json.JSONDecodeError as e:
407
- print(f"Failed to decode JSON at line {line_number} in file {filepath}: {e}. Skipping.")
408
- return
409
-
410
- # Extract the ID field
411
- id_ = row.get(id_field_name, f"unknown_{line_number}")
412
- if not id_:
413
- print(f"Key '{id_field_name}' not found in row at line {line_number}. Skipping row.")
414
- return
415
-
416
- # Prepare the sample dictionary
417
- sample = {feature: row.get(feature, None) for feature in self._info().features}
418
-
419
- # Handle nested structures (e.g., `protocol_sentences`)
420
- if "protocol_sentences" in row:
421
- protocol_sentences = row.get("protocol_sentences", [])
422
- for sentence in protocol_sentences:
423
- sentence_id = sentence.get("sentence_id", None)
424
- if sentence_id:
425
- yield sentence_id, sentence
426
- else:
427
  try:
428
- yield id_, sample
429
- except Exception as e:
430
- print(f"Failed to yield sample at line {line_number} in file {filepath}. Error: {e}. Sample: {sample}.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
356
  ]
357
 
358
  def _generate_examples(self, data_files):
359
+ """
360
+ Generate examples for both streaming and non-streaming modes.
361
+ Handles `.bz2` and `.zip` files based on mode.
362
+ """
363
  if "all_features_sentences" in self.config.name:
364
  id_field_name = "sentence_id"
365
  elif self.config.name == "protocols":
 
368
  id_field_name = "faction_id"
369
  elif self.config.name == "knessetMembers":
370
  id_field_name = "person_id"
371
+ else:
372
+ id_field_name = "id" # Default for generic configurations
373
 
374
  for filepath in data_files:
375
  try:
376
+ if filepath.endswith(".bz2"):
377
+ # Stream `.bz2` file
378
+ with bz2.open(filepath, "rt", encoding="utf-8") as f:
379
  for line_number, line in enumerate(f):
380
  yield from self._process_line(line, id_field_name, line_number, filepath)
381
+ elif filepath.endswith(".zip"):
382
+ # Stream `.zip` file
383
+ with zipfile.ZipFile(filepath, "r") as archive:
384
+ for filename in archive.namelist():
385
+ with archive.open(filename) as f:
386
+ for line_number, line in enumerate(io.TextIOWrapper(f, encoding="utf-8")):
387
+ yield from self._process_line(line, id_field_name, line_number, filename)
388
  else:
389
+ # Stream `.jsonl` or other uncompressed files
390
+ with open(filepath, "rt", encoding="utf-8") as f:
391
+ for line_number, line in enumerate(f):
392
+ yield from self._process_line(line, id_field_name, line_number, filepath)
 
 
 
 
 
 
 
 
 
 
 
 
393
  except Exception as e:
394
  print(f"Error processing file '{filepath}': {e}. Skipping.")
395
 
396
+ def _process_line(self, line, id_field_name, line_number, filepath):
397
+ """
398
+ Process a single line of input, handling JSON decoding and nested structures.
399
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400
  try:
401
+ row = json.loads(line)
402
+ except json.JSONDecodeError as e:
403
+ print(f"Failed to decode JSON at line {line_number} in file {filepath}: {e}. Skipping.")
404
+ return
405
+
406
+ # Extract the ID field
407
+ id_ = row.get(id_field_name, f"unknown_{line_number}")
408
+ if not id_:
409
+ print(f"Key '{id_field_name}' not found in row at line {line_number}. Skipping row.")
410
+ return
411
+
412
+ # Prepare the sample dictionary
413
+ sample = {feature: row.get(feature, None) for feature in self._info().features}
414
+
415
+ # Handle nested structures (e.g., `protocol_sentences`)
416
+ if "protocol_sentences" in row:
417
+ protocol_sentences = row.get("protocol_sentences", [])
418
+ for sentence in protocol_sentences:
419
+ sentence_id = sentence.get("sentence_id", None)
420
+ if sentence_id:
421
+ yield sentence_id, sentence
422
+ else:
423
+ try:
424
+ yield id_, sample
425
+ except Exception as e:
426
+ print(f"Failed to yield sample at line {line_number} in file {filepath}. Error: {e}. Sample: {sample}.")