metadata
dataset_info:
- config_name: default
features:
- name: uid
dtype: int64
- name: id
dtype: int64
- name: comment_text
dtype: string
- name: toxicity
dtype:
class_label:
names:
'0': non-toxic
'1': toxic
- name: has_active_attrs
dtype: bool
- name: active_attrs
sequence: string
- name: male
dtype: int64
- name: female
dtype: int64
- name: LGBTQ
dtype: int64
- name: christian
dtype: int64
- name: muslim
dtype: int64
- name: other_religions
dtype: int64
- name: black
dtype: int64
- name: white
dtype: int64
- name: identity_any
dtype: int64
- name: severe_toxicity
dtype: int64
- name: obscene
dtype: int64
- name: threat
dtype: int64
- name: insult
dtype: int64
- name: identity_attack
dtype: int64
- name: sexual_explicit
dtype: int64
splits:
- name: train
num_bytes: 136807710
num_examples: 267516
- name: test
num_bytes: 67509707
num_examples: 132730
- name: validation
num_bytes: 23031837
num_examples: 45047
download_size: 114641124
dataset_size: 227349254
- config_name: raw
features:
- name: id
dtype: int64
- name: comment_text
dtype: string
- name: split
dtype: string
- name: created_date
dtype: string
- name: publication_id
dtype: int64
- name: parent_id
dtype: float64
- name: article_id
dtype: int64
- name: rating
dtype: string
- name: funny
dtype: int64
- name: wow
dtype: int64
- name: sad
dtype: int64
- name: likes
dtype: int64
- name: disagree
dtype: int64
- name: toxicity
dtype: float64
- name: severe_toxicity
dtype: float64
- name: obscene
dtype: float64
- name: sexual_explicit
dtype: float64
- name: identity_attack
dtype: float64
- name: insult
dtype: float64
- name: threat
dtype: float64
- name: male
dtype: float64
- name: female
dtype: float64
- name: transgender
dtype: float64
- name: other_gender
dtype: float64
- name: heterosexual
dtype: float64
- name: homosexual_gay_or_lesbian
dtype: float64
- name: bisexual
dtype: float64
- name: other_sexual_orientation
dtype: float64
- name: christian
dtype: float64
- name: jewish
dtype: float64
- name: muslim
dtype: float64
- name: hindu
dtype: float64
- name: buddhist
dtype: float64
- name: atheist
dtype: float64
- name: other_religion
dtype: float64
- name: black
dtype: float64
- name: white
dtype: float64
- name: asian
dtype: float64
- name: latino
dtype: float64
- name: other_race_or_ethnicity
dtype: float64
- name: physical_disability
dtype: float64
- name: intellectual_or_learning_disability
dtype: float64
- name: psychiatric_or_mental_illness
dtype: float64
- name: other_disability
dtype: float64
- name: identity_annotator_count
dtype: int64
- name: toxicity_annotator_count
dtype: int64
- name: LGBTQ
dtype: int64
- name: other_religions
dtype: int64
- name: asian_latino_etc
dtype: int64
- name: disability_any
dtype: int64
- name: identity_any
dtype: int64
- name: num_identities
dtype: float64
- name: more_than_one_identity
dtype: bool
- name: na_gender
dtype: int64
- name: na_orientation
dtype: int64
- name: na_religion
dtype: int64
- name: na_race
dtype: int64
- name: na_disability
dtype: int64
splits:
- name: train
num_bytes: 373627048
num_examples: 448000
download_size: 137912393
dataset_size: 373627048
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
- split: validation
path: data/validation-*
- config_name: raw
data_files:
- split: train
path: raw/train-*
This is the CivilComments datasets available in the wilds
library library and downloadable from codalab
whose README.md file is
# Release notes for CivilComments
## v1.0
Dec 9, 2020: First release.
There are two configurations:
default
: The processed data (more details below)raw
: The raw datasets as downloaded from the link above
The default
configuration has been created using the processing function reported in processing_script.py