upload
Browse files- README.md +7 -0
- prepare_data.py +19 -0
- test.jsonl +3 -0
- train.jsonl +3 -0
README.md
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Amazon Counterfactual Statements
|
2 |
+
|
3 |
+
This dataset is the *en-ext* split from [SetFit/amazon_counterfactual](https://huggingface.co/datasets/SetFit/amazon_counterfactual). As the original test set is rather small (1333 examples), a different split was created with 50-50 for training & testing.
|
4 |
+
|
5 |
+
The dataset is described in [amazon-multilingual-counterfactual-dataset](https://github.com/amazon-research/amazon-multilingual-counterfactual-dataset) / [Paper](https://arxiv.org/pdf/2104.06893.pdf)
|
6 |
+
|
7 |
+
It contains statements from Amazon reviews about events that did not or cannot take place.
|
prepare_data.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import load_dataset
|
2 |
+
import json
|
3 |
+
import random
|
4 |
+
|
5 |
+
dataset = load_dataset("SetFit/amazon_counterfactual", "en-ext")
|
6 |
+
|
7 |
+
examples = list(dataset['train'])+list(dataset['validation'])+list(dataset['test'])
|
8 |
+
random.seed(42)
|
9 |
+
random.shuffle(examples)
|
10 |
+
|
11 |
+
|
12 |
+
num_test = 5000
|
13 |
+
data_splits = {'test': examples[0:num_test], 'train': examples[num_test:]}
|
14 |
+
|
15 |
+
|
16 |
+
for split in data_splits.keys():
|
17 |
+
with open(f'{split}.jsonl', 'w') as fOut:
|
18 |
+
for row in data_splits[split]:
|
19 |
+
fOut.write(json.dumps(row)+"\n")
|
test.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4a8d0d50f925f984a9ae51a42e913178387825bd50cf36372077cfacb1d7b3aa
|
3 |
+
size 815592
|
train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bbbf41050834d7ca88a9edc47f9035e06d0b72b9bf81263e84ea4c9af7b84860
|
3 |
+
size 814811
|