|  | import pandas as pd | 
					
						
						|  | from datasets import load_dataset | 
					
						
						|  | from sklearn.model_selection import train_test_split | 
					
						
						|  | import urllib | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | HATE = 1 | 
					
						
						|  | NOT_HATE = 0 | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | class_mapping = { | 
					
						
						|  | 'target_gender_aggregated': 0, | 
					
						
						|  | 'target_race_aggregated': 1, | 
					
						
						|  | 'target_sexuality_aggregated': 2, | 
					
						
						|  | 'target_religion_aggregated': 3, | 
					
						
						|  | 'target_origin_aggregated': 4, | 
					
						
						|  | 'target_disability_aggregated': 5, | 
					
						
						|  | 'target_age_aggregated': 6, | 
					
						
						|  | 'not_hate': 7 | 
					
						
						|  | } | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def map_label(x): | 
					
						
						|  | if x >= -1 and x <= 0.5: | 
					
						
						|  | label = 999 | 
					
						
						|  | elif x > 0.5: | 
					
						
						|  | label = HATE | 
					
						
						|  | elif x < -1: | 
					
						
						|  | label = NOT_HATE | 
					
						
						|  | return label | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def clean_text(text): | 
					
						
						|  | text = text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ') | 
					
						
						|  |  | 
					
						
						|  | new_text = [] | 
					
						
						|  | for t in text.split(): | 
					
						
						|  |  | 
					
						
						|  | t = '@user' if t.startswith('@') and len(t) > 1 and t.replace('@','').lower() not in verified_users else t | 
					
						
						|  | t = '{URL}' if (t.startswith('http') or t.startswith('URL')) else t | 
					
						
						|  | new_text.append(t) | 
					
						
						|  |  | 
					
						
						|  | return ' '.join(new_text) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | dataset = load_dataset('ucberkeley-dlab/measuring-hate-speech') | 
					
						
						|  | df = dataset['train'].to_pandas() | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | df['annon_label'] = df['hate_speech_score'].apply(map_label) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | df = df[df['platform'] == 2] | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | df = df[df['annon_label'].isin([HATE, NOT_HATE])] | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | df_count_label = pd.DataFrame(df.groupby('comment_id')['annon_label'].value_counts()) | 
					
						
						|  | df_count_label = df_count_label.rename(columns={'annon_label': 'count'}) | 
					
						
						|  | df_count_label = df_count_label.reset_index(level=1) | 
					
						
						|  | df_count_label = df_count_label[df_count_label['count'] >= 2] | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | df = df.set_index('comment_id') | 
					
						
						|  | df['label'] = None | 
					
						
						|  | df['label'] = df_count_label['annon_label'] | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | df = df[df['label'].notnull()] | 
					
						
						|  | df = df.reset_index() | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | targets = ['target_race', 'target_religion', 'target_origin', 'target_gender', | 
					
						
						|  | 'target_sexuality', 'target_age', 'target_disability'] | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | for t in targets: | 
					
						
						|  |  | 
					
						
						|  | df_count_targets = pd.DataFrame(df.groupby('comment_id')[t].value_counts()) | 
					
						
						|  | df_count_targets = df_count_targets.rename(columns={t: 'count'}) | 
					
						
						|  | df_count_targets = df_count_targets.reset_index(level=1) | 
					
						
						|  | df_count_targets = df_count_targets[df_count_targets['count'] >= 2] | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | df_count_targets = df_count_targets.loc[df_count_targets.index.drop_duplicates(keep=False)] | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | df = df.set_index('comment_id') | 
					
						
						|  | df[f'{t}_aggregated'] = False | 
					
						
						|  | df[f'{t}_aggregated'] = df_count_targets[t] | 
					
						
						|  | df[f'{t}_aggregated'] = df[f'{t}_aggregated'].fillna(False) | 
					
						
						|  | df = df.reset_index() | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | targets_aggregated = [f'{t}_aggregated' for t in targets] | 
					
						
						|  |  | 
					
						
						|  | df['target'] = df[targets_aggregated].apply(lambda row: row[row].index, axis=1) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | df['target'] = df['target'].apply(lambda x: x[0] if len(x) == 1 else None) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | df = df.groupby('comment_id').nth(0) | 
					
						
						|  | df = df.reset_index() | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | idx_multiclass = df[df['label'] == 1].index | 
					
						
						|  | idx_not_hate = df[df['label'] == 0].index | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | df['gold_label'] = None | 
					
						
						|  | df.loc[idx_not_hate, 'gold_label'] = 'not_hate' | 
					
						
						|  | df.loc[idx_multiclass, 'gold_label'] = df.loc[idx_multiclass]['target'] | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | df = df.dropna(subset='gold_label') | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | verified_users = urllib.request.urlopen('https://raw.githubusercontent.com/cardiffnlp/timelms/main/data/verified_users.v091122.txt').readlines() | 
					
						
						|  | verified_users = [x.decode().strip('\n').lower() for x in verified_users] | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | df['text'] = df['text'].apply(clean_text) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | df['gold_label'] = df['gold_label'].map(class_mapping) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | test_size = int(0.2 * len(df)) | 
					
						
						|  | val_size = int(0.1 * len(df)) | 
					
						
						|  |  | 
					
						
						|  | train, test = train_test_split(df, test_size=test_size, stratify=df['gold_label'].values, random_state=4) | 
					
						
						|  | train, val = train_test_split(train, test_size=val_size, stratify=train['gold_label'].values, random_state=4) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | cols_to_keep = ['gold_label', 'text'] | 
					
						
						|  | train[cols_to_keep].to_json('../data/tweet_hate/train.jsonl', lines=True, orient='records') | 
					
						
						|  | val[cols_to_keep].to_json('../data/tweet_hate/validation.jsonl', lines=True, orient='records') | 
					
						
						|  | test[cols_to_keep].to_json('../data/tweet_hate/test.jsonl', lines=True, orient='records') |