|
import json |
|
import argparse |
|
import sys |
|
import numpy as np |
|
import jieba |
|
import nltk |
|
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction |
|
from nltk import ngrams |
|
|
|
def bleu(data): |
|
""" |
|
compute rouge score |
|
Args: |
|
data (list of dict including reference and candidate): |
|
Returns: |
|
res (dict of list of scores): rouge score |
|
""" |
|
|
|
res = {} |
|
for i in range(1, 5): |
|
res["sentence-bleu-%d"%i] = [] |
|
res["corpus-bleu-%d"%i] = nltk.translate.bleu_score.corpus_bleu([[d["reference"].strip().split()] for d in data], [d["candidate"].strip().split() for d in data], weights=tuple([1./i for j in range(i)])) |
|
for tmp_data in data: |
|
origin_candidate = tmp_data['candidate'] |
|
origin_reference = tmp_data['reference'] |
|
assert isinstance(origin_candidate, str) |
|
if not isinstance(origin_reference, list): |
|
origin_reference = [origin_reference] |
|
|
|
for i in range(1, 5): |
|
res["sentence-bleu-%d"%i].append(sentence_bleu(references=[r.strip().split() for r in origin_reference], hypothesis=origin_candidate.strip().split(), weights=tuple([1./i for j in range(i)]))) |
|
|
|
for key in res: |
|
if "sentence" in key: |
|
res[key] = np.mean(res[key]) |
|
|
|
return res |
|
|
|
|
|
|
|
def distinct(eval_data): |
|
result = {} |
|
for i in range(1, 5): |
|
all_ngram, all_ngram_num = {}, 0. |
|
for k, tmp_data in enumerate(eval_data): |
|
ngs = ["_".join(c) for c in ngrams(tmp_data["candidate"].strip().split(), i)] |
|
all_ngram_num += len(ngs) |
|
for s in ngs: |
|
if s in all_ngram: |
|
all_ngram[s] += 1 |
|
else: |
|
all_ngram[s] = 1 |
|
result["distinct-%d"%i] = len(all_ngram) / float(all_ngram_num) |
|
return result |
|
|
|
|
|
|
|
def load_file(filename): |
|
data = [] |
|
with open(filename, "r") as f: |
|
for line in f.readlines(): |
|
data.append(json.loads(line)) |
|
f.close() |
|
return data |
|
|
|
def proline(line): |
|
return " ".join([w for w in jieba.cut("".join(line.strip().split()))]) |
|
|
|
|
|
def compute(golden_file, pred_file, return_dict=True): |
|
golden_data = load_file(golden_file) |
|
pred_data = load_file(pred_file) |
|
|
|
if len(golden_data) != len(pred_data): |
|
raise RuntimeError("Wrong Predictions") |
|
|
|
eval_data = [{"reference": proline(g["plot"]), "candidate": proline(p["plot"])} for g, p in zip(golden_data, pred_data)] |
|
res = bleu(eval_data) |
|
res.update(distinct(eval_data)) |
|
for key in res: |
|
res[key] = "_" |
|
return res |
|
|
|
def main(): |
|
argv = sys.argv |
|
print("预测结果:{}, 测试集: {}".format(argv[1], argv[2])) |
|
print(compute(argv[2], argv[1])) |
|
|
|
|
|
if __name__ == '__main__': |
|
main() |
|
|