dmahata commited on
Commit
14f9fa0
·
1 Parent(s): fca122e

Upload evaluate_gpt.py

Browse files
Files changed (1) hide show
  1. evaluate_gpt.py +181 -0
evaluate_gpt.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # # -*- coding: utf-8 -*-
2
+ # """evaluate.ipynb
3
+
4
+ # Automatically generated by Colaboratory.
5
+
6
+ # Original file is located at
7
+ # https://colab.research.google.com/drive/1_WZN6_5mgwRgg484xzXMSwCXBQXfr8Vj
8
+ # """
9
+
10
+ # # -*- coding: utf-8 -*-
11
+
12
+ # """# code here"""
13
+ # print("**************OUTPUT FILE PATH UPDATED FOR SEED 42 hinglish ******************")
14
+
15
+ import numpy as np
16
+ import timeit
17
+ import torch
18
+ #from torch.utils.data import DataLoader, TensorDataset, RandomSampler
19
+ import json, argparse
20
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
21
+ import pandas as pd
22
+ from torch.utils.data import Dataset, DataLoader
23
+ import transformers
24
+ from transformers import GPT2Tokenizer, GPT2LMHeadModel
25
+ print('use transformers version = ',transformers.__version__) # make sure it is 2.6.0
26
+
27
+
28
+ def add_special_tokens(tokenizer):
29
+ """ Returns GPT2 tokenizer after adding separator and padding tokens """
30
+ #tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
31
+ special_tokens = {'bos_token':'<|startoftext|>','eos_token':'<|endoftext|>', 'pad_token':'<|pad|>','sep_token':'<|summarize|>'}
32
+ num_add_toks = tokenizer.add_special_tokens(special_tokens)
33
+ return tokenizer
34
+
35
+
36
+ class GPT21024Dataset(Dataset):
37
+
38
+ #def __init__(self, root_dir, ids_file, mode='train',length=None):
39
+ def __init__(self, text, ctext, tokenizer, source_len, summ_len):
40
+ self.tokenizer = add_special_tokens(tokenizer)
41
+ # self.data = dataframe
42
+ self.source_len = source_len
43
+ self.summ_len = summ_len
44
+ # self.text = self.data['summary-hinglish'] ## the summary
45
+ # self.ctext = self.data['dialogue-hinglish'] ## ctext is the article to be summarized
46
+ self.text = text ## the summary
47
+ self.ctext = ctext
48
+
49
+
50
+ def __len__(self):
51
+ return len(self.ctext)
52
+ #return self.len
53
+
54
+ def __getitem__(self,index):
55
+
56
+ ##articles
57
+ ctext = str(self.ctext[index])
58
+ ctext = ' '.join(ctext.split())
59
+
60
+ ##summaries
61
+
62
+ text = str(self.text[index])
63
+ text = ' '.join(text.split())
64
+
65
+
66
+ tok_data={}
67
+ tok_data['article']= ctext
68
+ tok_data['summary']= text
69
+
70
+ input_ids= '<|startoftext|>' + tok_data['article'] + '<|summarize|>'
71
+ summary= tok_data['summary']
72
+
73
+ content = self.tokenizer.encode(input_ids, max_length = 512, padding='max_length',truncation=True)
74
+ summary_target_ids= self.tokenizer.encode( summary, max_length = 512, padding='max_length',truncation=True)
75
+
76
+ #texts[:len(content)] = content
77
+ texts = torch.tensor(content)
78
+ summary_target_ids=torch.tensor(summary_target_ids)
79
+ sample = {'article': texts, 'actual_summary': summary_target_ids, 'sum_idx': len(self.tokenizer.encode(tok_data['article']))}
80
+ return sample
81
+
82
+ def gpt_eval(
83
+ verbose=True,
84
+ model_name_path=None,
85
+ src_txt=None,
86
+ tar_txt=None,
87
+ gen_path=None,
88
+ scor_path=None,
89
+ batch_size=4
90
+ ):
91
+ """
92
+ """
93
+ predictions=[]
94
+ actuals=[]
95
+
96
+ model = GPT2LMHeadModel.from_pretrained(model_name_path)
97
+ tokenizer = GPT2Tokenizer.from_pretrained(model_name_path)
98
+
99
+ # Add a [CLS] to the vocabulary (we should train it also!)
100
+ #special_tokens = {'bos_token':'<|startoftext|>','eos_token':'<|endoftext|>','pad_token':'<pad>','additional_special_tokens':['<|keyword|>','<|summarize|>']}
101
+ #tokenizer.add_special_tokens(special_tokens)
102
+
103
+ """
104
+ special_tokens = {'pad_token':'<|pad|>','sep_token':'<|summarize|>'}
105
+ tokenizer.add_special_tokens(special_tokens)
106
+
107
+ #assert len(tokenizer) == 50261, "tokenizer size is not 50261"
108
+ model.resize_token_embeddings(len(tokenizer))
109
+ print(' ')
110
+ """
111
+
112
+ model = model.to(device)
113
+ model.eval()
114
+ """
115
+ input_text = input_text +' <|summarize|>'
116
+ input_token = tokenizer.encode(input_text)
117
+ input_token_torch = torch.tensor(input_token, dtype=torch.long)
118
+ """
119
+
120
+ val_params = {
121
+ 'batch_size':batch_size,
122
+ 'shuffle': False,
123
+ 'num_workers': 0
124
+ }
125
+
126
+ sp= open(src_txt,'r')
127
+ src= sp.readlines()
128
+ sp.close()
129
+ tp = open(tar_txt, 'r')
130
+ tar=tp.readlines()
131
+ tp.close()
132
+ val_set = GPT21024Dataset(tar, src,tokenizer, 512, 150)
133
+ val_loader = DataLoader(val_set, **val_params)
134
+
135
+ with torch.no_grad():
136
+ for _, data in enumerate(val_loader, 0):
137
+
138
+
139
+
140
+ target_output = data['actual_summary'].to(device, dtype = torch.long)
141
+ input_ids = data['article']
142
+ input_ids=input_ids.to(device)
143
+ #print(input_ids)
144
+
145
+ print(f'Length of the input context: {len(input_ids[0])}')
146
+ print(f'BEAM SIZE: {4}')
147
+ #input_ids.unsqueeze(0).to(device)
148
+ generated_output = model.generate(
149
+ input_ids=input_ids,
150
+ max_length= 582,
151
+ min_length = 562 ,
152
+ temperature=1.0,
153
+ decoder_start_token_id= '<|summarize|>',
154
+ num_beams=4,
155
+ num_return_sequences=1)
156
+
157
+ # print(f' Generated_output: {generated_output}')
158
+
159
+ preds=[]
160
+ target=[]
161
+ ids=[]
162
+ for g in generated_output:
163
+ preds.append(tokenizer.decode(g[len(input_ids[0]):] , skip_special_tokens=True))
164
+
165
+ #preds = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in generated_output]
166
+ for t in target_output:
167
+ target.append(tokenizer.decode(t , skip_special_tokens=True))
168
+ #target = [tokenizer.decode(t, skip_special_tokens=True, clean_up_tokenization_spaces=True)for t in y]
169
+ if _%100==0:
170
+ print(f'Completed {_}')
171
+
172
+ predictions.extend(preds)
173
+ actuals.extend(target)
174
+
175
+ gp= open(gen_path, 'w')
176
+ for pre in predictions:
177
+ gp.write(pre+"\n")
178
+ gp.close()
179
+
180
+
181
+