Ziyuan111 commited on
Commit
58b6351
·
verified ·
1 Parent(s): 8191da2

Upload sarcasm.py

Browse files
Files changed (1) hide show
  1. sarcasm.py +94 -0
sarcasm.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Sarcasm
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/15_wDQ9RJXwyxbomu2F1k0pK9H7XZ1cuT
8
+ """
9
+ import geopandas
10
+ import matplotlib.pyplot as plt
11
+ import seaborn as sns
12
+ from shapely.geometry import Point
13
+ import pandas as pd
14
+ import geopandas as gpd
15
+ from datasets import (
16
+ GeneratorBasedBuilder, Version, DownloadManager, SplitGenerator, Split,
17
+ Features, Value, BuilderConfig, DatasetInfo
18
+ )
19
+ import matplotlib.pyplot as plt
20
+ import seaborn as sns
21
+ import csv
22
+ import json
23
+ from shapely.geometry import Point
24
+
25
+ # URL definitions
26
+ _URLS = {
27
+ "csv_file": "https://drive.google.com/uc?export=download&id=1WcPqVZasDy1nmGcildLS-uw_-04I9Max",
28
+ }
29
+
30
+ class Sarcasm(GeneratorBasedBuilder):
31
+ VERSION = Version("1.0.0")
32
+
33
+ def _info(self):
34
+ return DatasetInfo(
35
+ description="This dataset combines information from sarcasm",
36
+ features=Features({
37
+ "comments": Value("string"),
38
+ "contains_slash_s": Value("int64"),
39
+ }),
40
+ supervised_keys=None,
41
+ homepage="https://github.com/AuraMa111?tab=repositories",
42
+ citation="Citation for the combined dataset",
43
+ )
44
+
45
+ def _split_generators(self, dl_manager):
46
+ downloaded_files = dl_manager.download_and_extract(_URLS)
47
+ data_file_path = downloaded_files["csv_file"]
48
+
49
+ num_examples = pd.read_csv(data_file_path).shape[0]
50
+ train_size = int(0.6 * num_examples)
51
+ val_size = int(0.2 * num_examples)
52
+ test_size = num_examples - train_size - val_size
53
+
54
+ return [
55
+ SplitGenerator(
56
+ name=Split.TRAIN,
57
+ gen_kwargs={"data_file_path": data_file_path, "split": Split.TRAIN, "size": train_size}
58
+ ),
59
+ SplitGenerator(
60
+ name=Split.VALIDATION,
61
+ gen_kwargs={"data_file_path": data_file_path, "split": Split.VALIDATION, "size": val_size}
62
+ ),
63
+ SplitGenerator(
64
+ name=Split.TEST,
65
+ gen_kwargs={"data_file_path": data_file_path, "split": Split.TEST, "size": test_size}
66
+ ),
67
+ ]
68
+
69
+ def _generate_examples(self, data_file_path, split, size):
70
+ data = pd.read_csv(data_file_path)
71
+ if split == Split.TRAIN:
72
+ subset_data = data[:size]
73
+ elif split == Split.VALIDATION:
74
+ subset_data = data[size:size*2]
75
+ elif split == Split.TEST:
76
+ subset_data = data[size*2:]
77
+
78
+ for index, row in subset_data.iterrows():
79
+ example = {
80
+ "comments": row["comments"],
81
+ "contains_slash_s": row["contains_slash_s"]
82
+ }
83
+ yield index, example
84
+
85
+ # Instantiate your dataset class
86
+ sarcasm = Sarcasm()
87
+
88
+ # Build the datasets
89
+ sarcasm.download_and_prepare()
90
+
91
+ # Access the datasets for training, validation, and testing
92
+ dataset_train = sarcasm.as_dataset(split='train')
93
+ dataset_validation = sarcasm.as_dataset(split='validation')
94
+ dataset_test = sarcasm.as_dataset(split='test')