Aananda-giri's picture
Upload scrapy_engine/load_data.py with huggingface_hub
0dcf7f7 verified
# Get chunks of data 10000 paragraphs at a time
import csv
import json
import sys
# Increase the CSV field size limit
csv.field_size_limit(sys.maxsize) # otherwise it gives "Error: field larger than field limit (131072)"
def load_data(file_path, chunk_size=10000):
"""Reads a CSV file in chunks of specified size.
Args:
file_path: Path to the CSV file.
chunk_size: Number of rows to read at a time.
Yields:
A list of rows for each chunk.
"""
with open(file_path, 'r') as csvfile:
reader = csv.reader(csvfile)
# # Skip the header
# next(reader)
chunk = []
first_row=True
for row in reader:
# paragraphs: convert back to list
if not first_row:
row[2] = json.loads(row[2])
else:
first_row=False
chunk.append(row)
if len(chunk) >= chunk_size:
yield chunk
chunk = []
if chunk: # Handle the last chunk if not empty
yield chunk
if __name__ == '__main__':
file_path = '/content/drive/MyDrive/Research/datasets/crawled_data/crawled_data.csv'
chunk_size = 100 # 10000
# Example usage:
for chunk in load_data(file_path, chunk_size):
# .............................................
'''
* code to Process each chunk of data here
* each chunk is list of list.
* format of inner list of chunk is is: ['parent_url', 'page_title', 'paragraph']
e.g.
chunk = [
['https://www.bbc.com/nepali','मुख पृष्ठ - BBC News नेपाली', 'सुर्खेत र जुम्लामा बाहेक कर्णालीका अरू जिल्लामा शिशुका लागि आवश्यक एनआईसीयू सेवा नै उपलब्ध छैन।'],
['https://www.bbc.com/nepali', 'मुख पृष्ठ - BBC News नेपाली', 'नेपालले करिब एक महिना अघि नै औपचारिक पत्र पठाएर जीबी राईलाई स्वदेश फर्काइदिन गरेको आग्रहबारे मलेशियाले कुनै औपचारिक जबाफ दिएको छैन।'],
...
]
'''
# .............................................
print(f' columns : {chunk[0]}')
# First row
url = chunk[1][0]
title = chunk[1][1]
paragraphs = chunk[1][2]
print(f' row-1: url:{url}, title:{title}, \n paragraphs: {paragraphs}')
# do processing stuff
break