Jiann commited on
Commit
f5a0329
·
verified ·
1 Parent(s): 86ecd3d

Upload 6 files

Browse files
outgen/.DS_Store ADDED
Binary file (6.15 kB). View file
 
outgen/README.md ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Outline-Conditioned Generation Dataset
2
+
3
+ ### Data Example
4
+
5
+ ```
6
+ {
7
+ "story":
8
+ "有个人把神像放在驴子背上,赶着进城。凡是遇见他们的人都对着神像顶礼膜拜。驴子以为人们是向它致敬,便洋洋得意,大喊大叫,再也不肯往前走了。结果挨了驴夫狠狠的一棍。",
9
+ "outline":
10
+ ["对着神像顶礼膜拜", "再也不肯往前走", "神像放在驴子", "赶着进城", "驴夫狠狠", "洋洋得意", "大喊大叫", "遇见"],
11
+ "title":
12
+ "运神像的驴子"
13
+ }
14
+ ```
15
+
16
+ - "title" (`str`):input story title
17
+ - "outline"(`list of str`):input story outline (an out-of-order list of phrases)
18
+ - "story" (`str`):the target story
19
+
20
+
21
+
22
+ ### Evaluation
23
+
24
+ The prediction result should have the same format with `test.jsonl`
25
+
26
+ ```shell
27
+ python eval.py prediction_file test.jsonl
28
+ ```
29
+
30
+
31
+
32
+ We use bleu, distinct, coverage and order as the evaluation metrics. The output of the script `eval.py` is a dictionary as follows:
33
+
34
+ ```python
35
+ {'bleu-1': '_', 'bleu-2': '_', 'bleu-3': '_', 'bleu-4': '_', 'distinct-1': '_', 'distinct-2': '_', 'distinct-3': '_', 'distinct-4': '_', 'coverage': '_', 'order': '_'}
36
+ ```
37
+
38
+ - Dependencies: rouge\=\=1.0.0, jieba=0.42.1, nltk=3.6.2, numpy=1.20.3
39
+
outgen/eval.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import argparse
3
+ from os import pread
4
+ import sys
5
+ import numpy as np
6
+ import jieba
7
+ import nltk
8
+ from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
9
+ from nltk import ngrams
10
+ from rouge import Rouge
11
+ def bleu(data):
12
+ """
13
+ compute rouge score
14
+ Args:
15
+ data (list of dict including reference and candidate):
16
+ Returns:
17
+ res (dict of list of scores): rouge score
18
+ """
19
+
20
+ res = {}
21
+ for i in range(1, 5):
22
+ res["sentence-bleu-%d"%i] = []
23
+ res["corpus-bleu-%d"%i] = nltk.translate.bleu_score.corpus_bleu([[d["reference"].strip().split()] for d in data], [d["candidate"].strip().split() for d in data], weights=tuple([1./i for j in range(i)]))
24
+ for tmp_data in data:
25
+ origin_candidate = tmp_data['candidate']
26
+ origin_reference = tmp_data['reference']
27
+ assert isinstance(origin_candidate, str)
28
+ if not isinstance(origin_reference, list):
29
+ origin_reference = [origin_reference]
30
+
31
+ for i in range(1, 5):
32
+ res["sentence-bleu-%d"%i].append(sentence_bleu(references=[r.strip().split() for r in origin_reference], hypothesis=origin_candidate.strip().split(), weights=tuple([1./i for j in range(i)])))
33
+
34
+ for key in res:
35
+ if "sentence" in key:
36
+ res[key] = np.mean(res[key])
37
+
38
+ return res
39
+
40
+
41
+
42
+ def repetition_distinct(eval_data):
43
+ result = {}
44
+ for i in range(1, 5):
45
+ all_ngram, all_ngram_num = {}, 0.
46
+ for k, tmp_data in enumerate(eval_data):
47
+ ngs = ["_".join(c) for c in ngrams(tmp_data["candidate"].strip().split(), i)]
48
+ all_ngram_num += len(ngs)
49
+ for s in ngs:
50
+ if s in all_ngram:
51
+ all_ngram[s] += 1
52
+ else:
53
+ all_ngram[s] = 1
54
+ result["distinct-%d"%i] = len(all_ngram) / float(all_ngram_num)
55
+ return result
56
+
57
+
58
+ def rouge(ipt, cand):
59
+ rouge_name = ["rouge-1", "rouge-2", "rouge-l"]
60
+ item_name = ["f", "p", "r"]
61
+
62
+ res = {}
63
+ for name1 in rouge_name:
64
+ for name2 in item_name:
65
+ res["%s-%s"%(name1, name2)] = []
66
+ for k, (tmp_ipt, tmp_cand) in enumerate(zip(ipt, cand)):
67
+ for tmp_ref in tmp_ipt.split("#"):
68
+ # print(tmp_ref.strip())
69
+ # print(" ".join(tmp_cand))
70
+
71
+ # tmp_ref = tmp_ref.strip()
72
+ # tmp_hyp = " ".join(tmp_cand).strip()
73
+
74
+ tmp_ref = " ".join([w for w in "".join(tmp_ref.strip().split())])
75
+ tmp_hyp = " ".join([w for w in "".join(tmp_cand.strip().split())])
76
+ # print(tmp_ref)
77
+ # print(tmp_hyp)
78
+ try:
79
+ tmp_res = Rouge().get_scores(refs=tmp_ref, hyps=tmp_hyp)[0]
80
+ for name1 in rouge_name:
81
+ for name2 in item_name:
82
+ res["%s-%s"%(name1, name2)].append(tmp_res[name1][name2])
83
+ except:
84
+ continue
85
+ for name1 in rouge_name:
86
+ for name2 in item_name:
87
+ res["%s-%s"%(name1, name2)] = np.mean(res["%s-%s"%(name1, name2)])
88
+ return {"coverage": res["rouge-l-r"]}
89
+
90
+
91
+ def LCS(x, y):
92
+ """
93
+ Computes the length of the longest common subsequence (lcs) between two
94
+ strings. The implementation below uses a DP programming algorithm and runs
95
+ in O(nm) time where n = len(x) and m = len(y).
96
+ Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
97
+ Args:
98
+ x: collection of words
99
+ y: collection of words
100
+ Returns:
101
+ Table of dictionary of coord and len lcs
102
+ """
103
+ n, m = len(x), len(y)
104
+ table = dict()
105
+ for i in range(n + 1):
106
+ for j in range(m + 1):
107
+ if i == 0 or j == 0:
108
+ table[i, j] = 0
109
+ elif x[i - 1] == y[j - 1]:
110
+ table[i, j] = table[i - 1, j - 1] + 1
111
+ else:
112
+ table[i, j] = max(table[i - 1, j], table[i, j - 1])
113
+ return table
114
+
115
+ def Recon_LCS(x, y, exclusive=True):
116
+ """
117
+ Returns the Longest Subsequence between x and y.
118
+ Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
119
+ Args:
120
+ x: sequence of words
121
+ y: sequence of words
122
+ Returns:
123
+ sequence: LCS of x and y
124
+ """
125
+ i, j = len(x), len(y)
126
+ table = LCS(x, y)
127
+
128
+ def _recon(i, j):
129
+ """private recon calculation"""
130
+ if i == 0 or j == 0:
131
+ return []
132
+ elif x[i - 1] == y[j - 1]:
133
+ return _recon(i - 1, j - 1) + [(x[i - 1], i)]
134
+ elif table[i - 1, j] > table[i, j - 1]:
135
+ return _recon(i - 1, j)
136
+ else:
137
+ return _recon(i, j - 1)
138
+
139
+ recon_list = list(map(lambda x: x[0], _recon(i, j)))
140
+ if len(recon_list):
141
+ return "".join(recon_list).strip()
142
+ else:
143
+ return ""
144
+ # return Ngrams(recon_list, exclusive=exclusive)
145
+ # return recon_tuple
146
+
147
+
148
+ def lcs3_dp(input_x, input_y):
149
+ # input_y as column, input_x as row
150
+ dp = [([0] * (len(input_y)+1)) for i in range(len(input_x)+1)]
151
+ maxlen = maxindex = 0
152
+ for i in range(1, len(input_x)+1):
153
+ for j in range(1, len(input_y)+1):
154
+ if i == 0 or j == 0: # 在边界上,自行+1
155
+ dp[i][j] = 0
156
+ if input_x[i-1] == input_y[j-1]:
157
+ dp[i][j] = dp[i - 1][j - 1] + 1
158
+ if dp[i][j] > maxlen: # 随时更新最长长度和长度开始的位置
159
+ maxlen = dp[i][j]
160
+ maxindex = i - maxlen
161
+ # print('最长公共子串的长度是:%s' % maxlen)
162
+ # print('最长公共子串是:%s' % input_x[maxindex:maxindex + maxlen])
163
+ else:
164
+ dp[i][j] = 0
165
+ # for dp_line in dp:
166
+ # print(dp_line)
167
+ return input_x[maxindex:maxindex + maxlen]
168
+
169
+ def inversenum(a):
170
+ num = 0
171
+ all_num = 0
172
+ for i in range(0,len(a)):
173
+ for j in range(i,len(a)):
174
+ if a[i] > a[j]:
175
+ num += 1
176
+ all_num += 1
177
+ return num / float(all_num)
178
+
179
+ def find_all(sub,s):
180
+ index_list = []
181
+ index = s.find(sub)
182
+ while index != -1:
183
+ index_list.append(index)
184
+ index = s.find(sub,index+1)
185
+
186
+ if len(index_list) > 0:
187
+ return index_list
188
+ else:
189
+ return -1
190
+
191
+ def order(ipt, cand, kw2id):
192
+ num = []
193
+ for k, (tmp_ipt, tmp_cand, tmp_kw2id) in enumerate(zip(ipt, cand, kw2id)):
194
+ # all_pos = [[]]
195
+ pos = []
196
+ kw_list = list(tmp_kw2id.keys())
197
+ kw_list.reverse()
198
+
199
+ for tmp_ref in kw_list:
200
+ tmp_ref = "".join(tmp_ref.strip().split())
201
+ tmp_hyp = "".join(tmp_cand.strip().split())
202
+ lcs = lcs3_dp(tmp_ref, tmp_hyp)
203
+ if len(lcs)>1:
204
+ pos.append(tmp_hyp.find(lcs))
205
+ else:
206
+ pos.append(-1)
207
+ idlist = list(range(len(pos)))
208
+ orderlist = sorted(idlist, key=lambda x: pos[x])
209
+
210
+ new_rank = [-1 for _ in idlist]
211
+ for idl, ord in zip(idlist, orderlist):
212
+ new_rank[idl] = tmp_kw2id[kw_list[ord]]
213
+ num.append(1-inversenum(new_rank))
214
+
215
+ return {"order": np.mean(num)}
216
+
217
+
218
+
219
+ def load_file(filename, pred=False):
220
+ data = []
221
+ with open(filename, "r") as f:
222
+ for line in f.readlines():
223
+ if pred:
224
+ data.append({"story": line.strip()})
225
+ else:
226
+ data.append(json.loads(line))
227
+ f.close()
228
+ return data
229
+
230
+ def proline(line):
231
+ return " ".join([w for w in jieba.cut("".join(line.strip().split()))])
232
+
233
+
234
+ def compute(golden_file, pred_file, return_dict=True):
235
+ golden_data = load_file(golden_file)
236
+ pred_data = load_file(pred_file)#, pred=True)
237
+
238
+ if len(golden_data) != len(pred_data):
239
+ raise RuntimeError("Wrong Predictions")
240
+
241
+ ipt = ["#".join(g["outline"]) for g in golden_data]
242
+ truth = [g["story"] for g in golden_data]
243
+ pred = [p["story"] for p in pred_data]
244
+
245
+ kw2id = []
246
+ for i1, t1 in zip(ipt, truth):
247
+ kw_list = i1.strip().split("#")
248
+ pos = [t1.strip().find(kw.strip()) for kw in kw_list]
249
+
250
+ idlist = list(range(len(pos)))
251
+ orderlist = sorted(idlist, key=lambda x: pos[x])
252
+ kw2id.append({})
253
+ for idl, ord in zip(idlist, orderlist):
254
+ kw2id[-1][kw_list[ord]] = idl
255
+
256
+
257
+ eval_data = [{"reference": proline(g["story"]), "candidate": proline(p["story"])} for g, p in zip(golden_data, pred_data)]
258
+ res = bleu(eval_data)
259
+ res.update(repetition_distinct(eval_data))
260
+ res.update(rouge(ipt=ipt, cand=pred))
261
+ res.update(order(ipt=ipt, cand=pred, kw2id=kw2id))
262
+
263
+ # for key in res:
264
+ # res[key] = "_"
265
+ return res
266
+
267
+ def main():
268
+ argv = sys.argv
269
+ print("预测结果:{}, 测试集: {}".format(argv[1], argv[2]))
270
+ print(compute(argv[2], argv[1]))
271
+
272
+
273
+ if __name__ == '__main__':
274
+ main()
outgen/test.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
outgen/train.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
outgen/valid.jsonl ADDED
The diff for this file is too large to render. See raw diff