Upload README.md with huggingface_hub
Browse files
README.md
CHANGED
@@ -246,6 +246,11 @@ configs:
|
|
246 |
- split: train
|
247 |
path:
|
248 |
- pre_tokenized/nepberta.parquet
|
|
|
|
|
|
|
|
|
|
|
249 |
---
|
250 |
# Nepali LLM Datasets
|
251 |
|
@@ -284,11 +289,14 @@ len(nepberta_train['text'][0]) # length of large text equivalent to 500 MB text
|
|
284 |
nepberta_train = load_dataset("Aananda-giri/nepali_llm_datasets", name="nepberta", streaming=True)['train']
|
285 |
|
286 |
# using next
|
287 |
-
next(iter(nepberta_train))
|
|
|
288 |
|
289 |
# using for loop
|
290 |
for large_chunk in nepberta_train:
|
291 |
-
|
|
|
|
|
292 |
# code to process large_chunk['text']
|
293 |
|
294 |
# Load scrapy engine data
|
|
|
246 |
- split: train
|
247 |
path:
|
248 |
- pre_tokenized/nepberta.parquet
|
249 |
+
- config_name: iriisnepal_u_nepberta_512
|
250 |
+
data_files:
|
251 |
+
- split: train
|
252 |
+
path:
|
253 |
+
- "iriisnepal_u_nepberta_512.csv"
|
254 |
---
|
255 |
# Nepali LLM Datasets
|
256 |
|
|
|
289 |
nepberta_train = load_dataset("Aananda-giri/nepali_llm_datasets", name="nepberta", streaming=True)['train']
|
290 |
|
291 |
# using next
|
292 |
+
next_text_chunk = next(iter(nepberta_train))
|
293 |
+
print(len(next_text_chunk['text']))
|
294 |
|
295 |
# using for loop
|
296 |
for large_chunk in nepberta_train:
|
297 |
+
print(len(large_chunk['text']))
|
298 |
+
break
|
299 |
+
|
300 |
# code to process large_chunk['text']
|
301 |
|
302 |
# Load scrapy engine data
|