File size: 2,765 Bytes
d19772c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import json
import argparse
import sys
import numpy as np
import jieba
import nltk
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
from nltk import ngrams

def bleu(data):
    """
    compute rouge score
    Args:
        data (list of dict including reference and candidate):
    Returns:
            res (dict of list of scores): rouge score
    """

    res = {}
    for i in range(1, 5):
        res["sentence-bleu-%d"%i] = []
        res["corpus-bleu-%d"%i] = nltk.translate.bleu_score.corpus_bleu([[d["reference"].strip().split()] for d in data], [d["candidate"].strip().split() for d in data], weights=tuple([1./i for j in range(i)]))
    for tmp_data in data:
        origin_candidate = tmp_data['candidate']
        origin_reference = tmp_data['reference']
        assert isinstance(origin_candidate, str)
        if not isinstance(origin_reference, list):
            origin_reference = [origin_reference]

        for i in range(1, 5):
            res["sentence-bleu-%d"%i].append(sentence_bleu(references=[r.strip().split() for r in origin_reference], hypothesis=origin_candidate.strip().split(), weights=tuple([1./i for j in range(i)]))) 

    for key in res:
        if "sentence" in key:
            res[key] = np.mean(res[key])
        
    return res



def distinct(eval_data):
    result = {}
    for i in range(1, 5):
        all_ngram, all_ngram_num = {}, 0.
        for k, tmp_data in enumerate(eval_data):
            ngs = ["_".join(c) for c in ngrams(tmp_data["candidate"].strip().split(), i)]
            all_ngram_num += len(ngs)
            for s in ngs:
                if s in all_ngram:
                    all_ngram[s] += 1
                else:
                    all_ngram[s] = 1
        result["distinct-%d"%i] = len(all_ngram) / float(all_ngram_num)
    return result



def load_file(filename):
    data = []
    with open(filename, "r") as f:
        for line in f.readlines():
            data.append(json.loads(line))
        f.close()
    return data

def proline(line):
    return " ".join([w for w in jieba.cut("".join(line.strip().split()))])


def compute(golden_file, pred_file, return_dict=True):
    golden_data = load_file(golden_file)
    pred_data = load_file(pred_file)

    if len(golden_data) != len(pred_data):
        raise RuntimeError("Wrong Predictions")

    eval_data = [{"reference": proline(g["plot"]), "candidate": proline(p["plot"])} for g, p in zip(golden_data, pred_data)]
    res = bleu(eval_data)
    res.update(distinct(eval_data))
    for key in res:
        res[key] = "_"
    return res

def main():
    argv = sys.argv
    print("预测结果:{}, 测试集: {}".format(argv[1], argv[2]))
    print(compute(argv[2], argv[1]))


if __name__ == '__main__':
    main()