Upload README.md with huggingface_hub
Browse files
README.md
CHANGED
@@ -241,6 +241,11 @@ configs:
|
|
241 |
- split: train
|
242 |
path:
|
243 |
- "scrapy_engine/cleaned_data.csv"
|
|
|
|
|
|
|
|
|
|
|
244 |
---
|
245 |
# Nepali LLM Datasets
|
246 |
|
@@ -269,7 +274,7 @@ To load the datasets:
|
|
269 |
from datasets import load_dataset
|
270 |
|
271 |
# Load nepberta configuration
|
272 |
-
|
273 |
|
274 |
# length of chunks
|
275 |
len(nepberta_train['text']) # 18 : number of chunks
|
|
|
241 |
- split: train
|
242 |
path:
|
243 |
- "scrapy_engine/cleaned_data.csv"
|
244 |
+
- config_name: pre_tokenized
|
245 |
+
data_files:
|
246 |
+
- split: train
|
247 |
+
path:
|
248 |
+
- "pre_tokenized/nepberta.parquet"
|
249 |
---
|
250 |
# Nepali LLM Datasets
|
251 |
|
|
|
274 |
from datasets import load_dataset
|
275 |
|
276 |
# Load nepberta configuration
|
277 |
+
nepberta_dataset = load_dataset("Aananda-giri/nepali_llm_datasets", name="nepberta", split='train') # use `streaming=True` to avoid downloading all the dataset
|
278 |
|
279 |
# length of chunks
|
280 |
len(nepberta_train['text']) # 18 : number of chunks
|