import json import argparse import sys import numpy as np import jieba from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction from nltk import ngrams def load_file(filename): data = [] with open(filename, "r") as f: for line in f.readlines(): data.append(json.loads(line)) f.close() return data def proline(line): return " ".join([w for w in jieba.cut("".join(line.strip().split()))]) def compute(golden_file, pred_file, return_dict=True): golden_data = load_file(golden_file) pred_data = load_file(pred_file) if len(golden_data) != len(pred_data): raise RuntimeError("Wrong Predictions") num = 0 for g, p in zip(golden_data, pred_data): if isinstance(g["label"], str): l = int(g["label"].strip()) elif isinstance(g["label"], int): l = g["label"] else: raise Exception("Data type error") if isinstance(p["label"], str): p = int(p["label"].strip()) elif isinstance(p["label"], int): p = p["label"] else: raise Exception("Data type error") if l == p: num += 1 return {'accuracy': float(num)/len(golden_data)} def main(): argv = sys.argv print("预测结果:{}, 测试集: {}".format(argv[1], argv[2])) print(compute(argv[2], argv[1])) if __name__ == '__main__': main()