Datasets:
metadata
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
- config_name: unfiltered
data_files:
- split: train
path: unfiltered/train-*
dataset_info:
- config_name: default
features:
- name: text
dtype: string
- name: url
dtype: string
- name: dump
dtype: string
- name: source
dtype: string
- name: word_count
dtype: int64
- name: flesch_reading_ease
dtype: float64
splits:
- name: train
num_bytes: 2003343392.8658142
num_examples: 223162
- name: validation
num_bytes: 52722397.8378977
num_examples: 5873
- name: test
num_bytes: 52722397.8378977
num_examples: 5873
download_size: 1137457027
dataset_size: 2108788188.5416098
- config_name: unfiltered
features:
- name: text
dtype: string
- name: url
dtype: string
- name: dump
dtype: string
- name: source
dtype: string
- name: word_count
dtype: int64
- name: flesch_reading_ease
dtype: float64
splits:
- name: train
num_bytes: 3452998372
num_examples: 384646
download_size: 1859375824
dataset_size: 3452998372
source_datasets: mponty/code_tutorials
license: odc-by
task_categories:
- text-generation
language:
- en
size_categories:
- 100K<n<1M
Dataset Card for "code-tutorials-en"
en
only- 100 words or more
- reading ease of 50 or more
DatasetDict({
train: Dataset({
features: ['text', 'url', 'dump', 'source', 'word_count', 'flesch_reading_ease'],
num_rows: 223162
})
validation: Dataset({
features: ['text', 'url', 'dump', 'source', 'word_count', 'flesch_reading_ease'],
num_rows: 5873
})
test: Dataset({
features: ['text', 'url', 'dump', 'source', 'word_count', 'flesch_reading_ease'],
num_rows: 5873
})
})