Upload 6 files
Browse files- .gitattributes +1 -0
- README.md +30 -0
- senpos/README.md +37 -0
- senpos/eval.py +57 -0
- senpos/test.jsonl +0 -0
- senpos/train.jsonl +3 -0
- senpos/val.jsonl +0 -0
.gitattributes
CHANGED
@@ -56,3 +56,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
56 |
# Video files - compressed
|
57 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
58 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
56 |
# Video files - compressed
|
57 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
58 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
59 |
+
senpos/train.jsonl filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
configs:
|
3 |
+
- config_name: default
|
4 |
+
data_files:
|
5 |
+
- split: clozet_train
|
6 |
+
path: clozet/train*
|
7 |
+
- split: clozet_val
|
8 |
+
path: clozet/val*
|
9 |
+
- split: clozet_test
|
10 |
+
path: clozet/test*
|
11 |
+
- split: senpos_train
|
12 |
+
path: senpos/train*
|
13 |
+
- split: senpos_val
|
14 |
+
path: senpos/val*
|
15 |
+
- split: senpos_test
|
16 |
+
path: senpos/test*
|
17 |
+
- split: plotcom_train
|
18 |
+
path: plotcom/train*
|
19 |
+
- split: plotcom_val
|
20 |
+
path: plotcom/val*
|
21 |
+
- split: plotcom_test
|
22 |
+
path: plotcom/test*
|
23 |
+
- split: outgen_train
|
24 |
+
path: outgen/train*
|
25 |
+
- split: outgen_val
|
26 |
+
path: outgen/val*
|
27 |
+
- split: outgen_test
|
28 |
+
path: outgen/test*
|
29 |
+
|
30 |
+
---
|
senpos/README.md
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Sentence Position Prediction Dataset
|
2 |
+
|
3 |
+
### Data Example
|
4 |
+
|
5 |
+
```
|
6 |
+
{
|
7 |
+
"story":
|
8 |
+
"为了证明自己看见了这一切,路过银树林时,守望星折了一根小树枝。[MASK]丽娜听到树枝折断的声音时,回头问:“什么声音?”[MASK]“什么声音也没有。”[MASK]她的大姐说,“可能是哪座城堡的塔楼里,猫头鹰在叫唤。”[MASK]她讲话的时候,迈克悄悄地溜到前头,上了楼梯,第一个进了公主们的房间。[MASK]他推开窗户,顺着藤条滑了下去。[MASK]到花园的时候,太阳刚刚开始升起,他要开始工作了。[MASK]这一天,迈克捆扎鲜花的时候,故意把那根银色的树枝扎进了献给小公主的花里。[MASK]不过,她没有告诉姐姐们。",
|
9 |
+
"sentence":
|
10 |
+
"丽娜发现银树枝时,吃惊极了。",
|
11 |
+
"label":
|
12 |
+
8
|
13 |
+
}
|
14 |
+
```
|
15 |
+
|
16 |
+
- "Story" (`str`):input story,`<MASK>` means the candidate position
|
17 |
+
- "sentence" (`str`):the removed sentence
|
18 |
+
- "label" (`int`): label=$l$ means the $l$-th position is correct.
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
### Evaluation
|
23 |
+
|
24 |
+
The prediction result should have the same format with `test.jsonl`
|
25 |
+
|
26 |
+
```shell
|
27 |
+
python eval.py prediction_file test.jsonl
|
28 |
+
```
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
We use accuracy as the evaluation metric. The output of the script `eval.py` is a dictionary as follows:
|
33 |
+
|
34 |
+
```python
|
35 |
+
{"accuracy": _}
|
36 |
+
```
|
37 |
+
|
senpos/eval.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import argparse
|
3 |
+
import sys
|
4 |
+
import numpy as np
|
5 |
+
import jieba
|
6 |
+
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
|
7 |
+
from nltk import ngrams
|
8 |
+
|
9 |
+
|
10 |
+
|
11 |
+
def load_file(filename):
|
12 |
+
data = []
|
13 |
+
with open(filename, "r") as f:
|
14 |
+
for line in f.readlines():
|
15 |
+
data.append(json.loads(line))
|
16 |
+
f.close()
|
17 |
+
return data
|
18 |
+
|
19 |
+
def proline(line):
|
20 |
+
return " ".join([w for w in jieba.cut("".join(line.strip().split()))])
|
21 |
+
|
22 |
+
|
23 |
+
def compute(golden_file, pred_file, return_dict=True):
|
24 |
+
golden_data = load_file(golden_file)
|
25 |
+
pred_data = load_file(pred_file)
|
26 |
+
|
27 |
+
if len(golden_data) != len(pred_data):
|
28 |
+
raise RuntimeError("Wrong Predictions")
|
29 |
+
|
30 |
+
num = 0
|
31 |
+
for g, p in zip(golden_data, pred_data):
|
32 |
+
if isinstance(g["label"], str):
|
33 |
+
l = int(g["label"].strip())
|
34 |
+
elif isinstance(g["label"], int):
|
35 |
+
l = g["label"]
|
36 |
+
else:
|
37 |
+
raise Exception("Data type error")
|
38 |
+
|
39 |
+
if isinstance(p["label"], str):
|
40 |
+
p = int(p["label"].strip())
|
41 |
+
elif isinstance(p["label"], int):
|
42 |
+
p = p["label"]
|
43 |
+
else:
|
44 |
+
raise Exception("Data type error")
|
45 |
+
if l == p:
|
46 |
+
num += 1
|
47 |
+
|
48 |
+
return {'accuracy': float(num)/len(golden_data)}
|
49 |
+
|
50 |
+
def main():
|
51 |
+
argv = sys.argv
|
52 |
+
print("预测结果:{}, 测试集: {}".format(argv[1], argv[2]))
|
53 |
+
print(compute(argv[2], argv[1]))
|
54 |
+
|
55 |
+
|
56 |
+
if __name__ == '__main__':
|
57 |
+
main()
|
senpos/test.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
senpos/train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ef2b2d8a7baac9680295329a666a1ca773495c6d8b195164564661c9c8ae08b4
|
3 |
+
size 18810263
|
senpos/val.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|