|
|
|
|
|
import csv |
|
import json |
|
import sys |
|
|
|
|
|
csv.field_size_limit(sys.maxsize) |
|
|
|
def load_data(file_path, chunk_size=10000): |
|
"""Reads a CSV file in chunks of specified size. |
|
|
|
Args: |
|
file_path: Path to the CSV file. |
|
chunk_size: Number of rows to read at a time. |
|
|
|
Yields: |
|
A list of rows for each chunk. |
|
""" |
|
|
|
with open(file_path, 'r') as csvfile: |
|
reader = csv.reader(csvfile) |
|
|
|
|
|
|
|
|
|
chunk = [] |
|
first_row=True |
|
for row in reader: |
|
|
|
if not first_row: |
|
row[2] = json.loads(row[2]) |
|
else: |
|
first_row=False |
|
chunk.append(row) |
|
if len(chunk) >= chunk_size: |
|
yield chunk |
|
chunk = [] |
|
|
|
if chunk: |
|
yield chunk |
|
|
|
|
|
if __name__ == '__main__': |
|
file_path = '/content/drive/MyDrive/Research/datasets/crawled_data/crawled_data.csv' |
|
chunk_size = 100 |
|
|
|
|
|
for chunk in load_data(file_path, chunk_size): |
|
|
|
''' |
|
* code to Process each chunk of data here |
|
* each chunk is list of list. |
|
* format of inner list of chunk is is: ['parent_url', 'page_title', 'paragraph'] |
|
|
|
e.g. |
|
chunk = [ |
|
['https://www.bbc.com/nepali','मुख पृष्ठ - BBC News नेपाली', 'सुर्खेत र जुम्लामा बाहेक कर्णालीका अरू जिल्लामा शिशुका लागि आवश्यक एनआईसीयू सेवा नै उपलब्ध छैन।'], |
|
['https://www.bbc.com/nepali', 'मुख पृष्ठ - BBC News नेपाली', 'नेपालले करिब एक महिना अघि नै औपचारिक पत्र पठाएर जीबी राईलाई स्वदेश फर्काइदिन गरेको आग्रहबारे मलेशियाले कुनै औपचारिक जबाफ दिएको छैन।'], |
|
... |
|
] |
|
|
|
''' |
|
|
|
|
|
print(f' columns : {chunk[0]}') |
|
|
|
|
|
url = chunk[1][0] |
|
title = chunk[1][1] |
|
paragraphs = chunk[1][2] |
|
print(f' row-1: url:{url}, title:{title}, \n paragraphs: {paragraphs}') |
|
|
|
|
|
break |