repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/baseline_stackprop/utils/._loader.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/baseline_stackprop/utils/._module.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/data/._dialogue_data.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/model/._baseline_multi.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/model/._CHAN.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/model/._bert_model_context.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/model/.___init__.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/model/._transformer_new.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/model/._transformer.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/model/._torchcrf.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/model/._mia.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py
CaBERT-SLU
CaBERT-SLU-main/__MACOSX/CaBERT-SLU-main/model/._baseline_eca.py
Mac OS X  2ATTR<<com.apple.quarantineq/0081;6067ce05;Chrome;EB6190ED-75B9-4A04-87B2-88BD2FC5C689
207
207
207
py
CaBERT-SLU
CaBERT-SLU-main/baseline_stackprop/utils_bert.py
import random import torch import torch.nn as nn from torch.autograd import Variable from torch.optim import Adam, RMSprop from keras.preprocessing.sequence import pad_sequences from sklearn.model_selection import train_test_split import pickle import copy import numpy as np import collections from tqdm import tqdm from more_itertools import collapse from collections import defaultdict def load_data(X, maxlen): input_ids = pad_sequences(X, maxlen=maxlen, dtype="long", truncating="post", padding="post") attention_masks = [] for seq in input_ids: seq_mask = [float(i>0) for i in seq] attention_masks.append(seq_mask) return (input_ids, attention_masks) def f1_score_intents(outputs, labels): P, R, F1, acc = 0, 0, 0, 0 outputs = torch.sigmoid(outputs) for i in range(outputs.shape[0]): TP, FP, FN = 0, 0, 0 for j in range(outputs.shape[1]): if outputs[i][j] > 0.5 and labels[i][j] == 1: TP += 1 elif outputs[i][j] <= 0.5 and labels[i][j] == 1: FN += 1 elif outputs[i][j] > 0.5 and labels[i][j] == 0: FP += 1 precision = TP / float(TP + FP) if (TP + FP) != 0 else 0 recall = TP / float(TP + FN) if (TP + FN) != 0 else 0 F1 += 2 * precision * recall / float(precision + recall) if (precision + recall) != 0 else 0 P += precision R += recall p = (torch.where(outputs[i]>0.5)[0]) r = (torch.where(labels[i]==1)[0]) if len(p) == len(r) and (p == r).all(): acc += 1 P /= outputs.shape[0] R /= outputs.shape[0] F1 /= outputs.shape[0] return P, R, F1, acc ############################################3 def to_spans(l_ids, voc): """Convert a list of BIO labels, coded as integers, into spans identified by a beginning, an end, and a label. To allow easy comparison later, we store them in a dictionary indexed by the start position. @param l_ids: a list of predicted label indices @param voc: label vocabulary dictionary: index to label ex. 0: B-C """ spans = {} current_lbl = None current_start = None for i, l_id in enumerate(l_ids): #l = voc[l_id] l = l_id if l[0] == 'B': # Beginning of a named entity: B-something. if current_lbl: # If we're working on an entity, close it. spans[current_start] = (current_lbl, i) # Create a new entity that starts here. current_lbl = l[2:] current_start = i elif l[0] == 'I': # Continuation of an entity: I-something. if current_lbl: # If we have an open entity, but its label does not # correspond to the predicted I-tag, then we close # the open entity and create a new one. if current_lbl != l[2:]: spans[current_start] = (current_lbl, i) current_lbl = l[2:] current_start = i else: # If we don't have an open entity but predict an I tag, # we create a new entity starting here even though we're # not following the format strictly. current_lbl = l[2:] current_start = i else: # Outside: O. if current_lbl: # If we have an open entity, we close it. spans[current_start] = (current_lbl, i) current_lbl = None current_start = None if current_lbl != None: spans[current_start] = (current_lbl, i+1) return spans def compare(gold, pred, stats, mode='strict'): """Compares two sets of spans and records the results for future aggregation. @param gold: ground truth @param pred: predictions @param stats: the final dictionary with keys of different counts including total and specific labels ex. {'total': {'gold': 5, 'pred': 5}, 'Cause': {'gold': 5, 'pred': 5}} """ for start, (lbl, end) in gold.items(): stats['total']['gold'] += 1 stats[lbl]['gold'] += 1 for start, (lbl, end) in pred.items(): stats['total']['pred'] += 1 stats[lbl]['pred'] += 1 if mode == 'strict': for start, (glbl, gend) in gold.items(): if start in pred: plbl, pend = pred[start] if glbl == plbl and gend == pend: stats['total']['corr'] += 1 stats[glbl]['corr'] += 1 def evaluate_iob(predicted, gold, label_field, stats): """This function will evaluate the model from bert dataloader pipeline. """ #gold_cpu = gold.cpu().numpy() #pred_cpu = predicted.cpu().numpy() #gold_cpu = list(gold_cpu.reshape(-1)) #pred_cpu = list(pred_cpu.reshape(-1)) gold_cpu = gold pred_cpu = predicted # pred_cpu = [l for sen in predicted for l in sen] id2label = {v:k for k,v in label_field.items()} # Compute spans for the gold standard and prediction. gold_spans = to_spans(gold_cpu, id2label) pred_spans = to_spans(pred_cpu, id2label) # Finally, update the counts for correct, predicted and gold-standard spans. compare(gold_spans, pred_spans, stats, 'strict') def prf(stats): """ Computes precision, recall and F-score, given a dictionary that contains the counts of correct, predicted and gold-standard items. @params stats: the final statistics """ if stats['pred'] == 0: return 0, 0, 0 p = stats['corr']/stats['pred'] r = stats['corr']/stats['gold'] if p > 0 and r > 0: f = 2*p*r/(p+r) else: f = 0 return p, r, f
5,829
34.120482
115
py
CaBERT-SLU
CaBERT-SLU-main/baseline_stackprop/train.py
""" @Author : Lee, Qin @StartTime : 2018/08/13 @Filename : train.py @Software : Pycharm @Framework : Pytorch @LastModify : 2019/05/07 """ from utils.module import ModelManager from utils.loader import DatasetManager ###### from utils.process import Processor ##### import torch import os import json import random import argparse import numpy as np parser = argparse.ArgumentParser() # Training parameters. parser.add_argument('--data_dir', '-dd', type=str, default='data_with_slots/e2e') parser.add_argument('--save_dir', '-sd', type=str, default='save/e2e') parser.add_argument("--random_state", '-rs', type=int, default=0) parser.add_argument('--gpu', '-g', action='store_true', help='use gpu', required=False, default=False) parser.add_argument('--num_epoch', '-ne', type=int, default=20) parser.add_argument('--batch_size', '-bs', type=int, default=16) parser.add_argument('--l2_penalty', '-lp', type=float, default=1e-6) parser.add_argument("--learning_rate", '-lr', type=float, default=0.001) parser.add_argument('--dropout_rate', '-dr', type=float, default=0.4) parser.add_argument('--intent_forcing_rate', '-ifr', type=float, default=0.9) parser.add_argument("--differentiable", "-d", action="store_true", default=False) parser.add_argument('--slot_forcing_rate', '-sfr', type=float, default=0.9) # model parameters. parser.add_argument('--word_embedding_dim', '-wed', type=int, default=64) parser.add_argument('--encoder_hidden_dim', '-ehd', type=int, default=256) parser.add_argument('--intent_embedding_dim', '-ied', type=int, default=8) parser.add_argument('--slot_embedding_dim', '-sed', type=int, default=32) parser.add_argument('--slot_decoder_hidden_dim', '-sdhd', type=int, default=64) parser.add_argument('--intent_decoder_hidden_dim', '-idhd', type=int, default=64) parser.add_argument('--attention_hidden_dim', '-ahd', type=int, default=1024) parser.add_argument('--attention_output_dim', '-aod', type=int, default=128) if __name__ == "__main__": args = parser.parse_args() #args.gpu = args.gpu and torch.cuda.is_available() # Save training and model parameters. if not os.path.exists(args.save_dir): os.system("mkdir -p " + args.save_dir) log_path = os.path.join(args.save_dir, "param.json") with open(log_path, "w") as fw: fw.write(json.dumps(args.__dict__, indent=True)) # Fix the random seed of package random. random.seed(args.random_state) np.random.seed(args.random_state) # Fix the random seed of Pytorch when using GPU. if torch.cuda.is_available(): torch.cuda.manual_seed_all(args.random_state) torch.cuda.manual_seed(args.random_state) # Fix the random seed of Pytorch when using CPU. torch.manual_seed(args.random_state) torch.random.manual_seed(args.random_state) # Instantiate a dataset object. dataset = DatasetManager(args) dataset.quick_build() dataset.show_summary() # Instantiate a network model object. model = ModelManager( args, len(dataset.word_alphabet), len(dataset.slot_alphabet), len(dataset.intent_alphabet)) model.show_summary() # To train and evaluate the models. process = Processor(dataset, model, args.batch_size) process.train() print('\nAccepted performance slot_f1, intent_f1, intent_acc, sent_acc: ' + str(Processor.validate( os.path.join(args.save_dir, "model/model.pkl"), os.path.join(args.save_dir, "model/dataset.pkl"), args.batch_size)) + " at test dataset;\n")
3,585
36.747368
103
py
CaBERT-SLU
CaBERT-SLU-main/baseline_stackprop/utils/module.py
""" @Author : Lee, Qin @StartTime : 2018/08/13 @Filename : module.py @Software : Pycharm @Framework : Pytorch @LastModify : 2019/05/07 """ import math import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils.rnn import pack_padded_sequence from torch.nn.utils.rnn import pad_packed_sequence class ModelManager(nn.Module): def __init__(self, args, num_word, num_slot, num_intent): super(ModelManager, self).__init__() self.__num_word = num_word self.__num_slot = num_slot self.__num_intent = num_intent self.__args = args # Initialize an embedding object. self.__embedding = EmbeddingCollection( self.__num_word, self.__args.word_embedding_dim ) # Initialize an LSTM Encoder object. self.__encoder = LSTMEncoder( self.__args.word_embedding_dim, self.__args.encoder_hidden_dim, self.__args.dropout_rate ) # Initialize an self-attention layer. self.__attention = SelfAttention( self.__args.word_embedding_dim, self.__args.attention_hidden_dim, self.__args.attention_output_dim, self.__args.dropout_rate ) # Initialize an Decoder object for intent. self.__intent_decoder = LSTMDecoder( self.__args.encoder_hidden_dim + self.__args.attention_output_dim, self.__args.intent_decoder_hidden_dim, self.__num_intent, self.__args.dropout_rate, embedding_dim=self.__args.intent_embedding_dim ) # Initialize an Decoder object for slot. self.__slot_decoder = LSTMDecoder( self.__args.encoder_hidden_dim + self.__args.attention_output_dim, self.__args.slot_decoder_hidden_dim, self.__num_slot, self.__args.dropout_rate, embedding_dim=self.__args.slot_embedding_dim, extra_dim=self.__num_intent ) # One-hot encoding for augment data feed. self.__intent_embedding = nn.Embedding( self.__num_intent, self.__num_intent ) self.__intent_embedding.weight.data = torch.eye(self.__num_intent) self.__intent_embedding.weight.requires_grad = False def show_summary(self): """ print the abstract of the defined model. """ print('Model parameters are listed as follows:\n') print('\tnumber of word: {};'.format(self.__num_word)) print('\tnumber of slot: {};'.format(self.__num_slot)) print('\tnumber of intent: {};'.format(self.__num_intent)) print('\tword embedding dimension: {};'.format(self.__args.word_embedding_dim)) print('\tencoder hidden dimension: {};'.format(self.__args.encoder_hidden_dim)) print('\tdimension of intent embedding: {};'.format(self.__args.intent_embedding_dim)) print('\tdimension of slot embedding: {};'.format(self.__args.slot_embedding_dim)) print('\tdimension of slot decoder hidden: {};'.format(self.__args.slot_decoder_hidden_dim)) print('\tdimension of intent decoder hidden: {};'.format(self.__args.intent_decoder_hidden_dim)) print('\thidden dimension of self-attention: {};'.format(self.__args.attention_hidden_dim)) print('\toutput dimension of self-attention: {};'.format(self.__args.attention_output_dim)) print('\nEnd of parameters show. Now training begins.\n\n') def forward(self, text, seq_lens, n_predicts=None, forced_slot=None, forced_intent=None): word_tensor, _ = self.__embedding(text) lstm_hiddens = self.__encoder(word_tensor, seq_lens) # transformer_hiddens = self.__transformer(pos_tensor, seq_lens) attention_hiddens = self.__attention(word_tensor, seq_lens) hiddens = torch.cat([attention_hiddens, lstm_hiddens], dim=1) pred_intent = self.__intent_decoder( hiddens, seq_lens, forced_input=forced_intent ) if not self.__args.differentiable: _, idx_intent = pred_intent.topk(1, dim=-1) feed_intent = self.__intent_embedding(idx_intent.squeeze(1)) else: feed_intent = pred_intent pred_slot = self.__slot_decoder( hiddens, seq_lens, forced_input=forced_slot, extra_input=feed_intent ###### ) if n_predicts is None: return F.log_softmax(pred_slot, dim=1), F.log_softmax(pred_intent, dim=1) else: _, slot_index = pred_slot.topk(n_predicts, dim=1) _, intent_index = pred_intent.topk(n_predicts, dim=1) return slot_index.cpu().data.numpy().tolist(), intent_index.cpu().data.numpy().tolist() def golden_intent_predict_slot(self, text, seq_lens, golden_intent, n_predicts=1): word_tensor, _ = self.__embedding(text) embed_intent = self.__intent_embedding(golden_intent) lstm_hiddens = self.__encoder(word_tensor, seq_lens) attention_hiddens = self.__attention(word_tensor, seq_lens) hiddens = torch.cat([attention_hiddens, lstm_hiddens], dim=1) pred_slot = self.__slot_decoder( hiddens, seq_lens, extra_input=embed_intent ) _, slot_index = pred_slot.topk(n_predicts, dim=-1) # Just predict single slot value. return slot_index.cpu().data.numpy().tolist() class EmbeddingCollection(nn.Module): """ Provide word vector and position vector encoding. """ def __init__(self, input_dim, embedding_dim, max_len=5000): super(EmbeddingCollection, self).__init__() self.__input_dim = input_dim # Here embedding_dim must be an even embedding. self.__embedding_dim = embedding_dim self.__max_len = max_len # Word vector encoder. self.__embedding_layer = nn.Embedding( self.__input_dim, self.__embedding_dim ) # Position vector encoder. # self.__position_layer = torch.zeros(self.__max_len, self.__embedding_dim) # position = torch.arange(0, self.__max_len).unsqueeze(1) # div_term = torch.exp(torch.arange(0, self.__embedding_dim, 2) * # (-math.log(10000.0) / self.__embedding_dim)) # Sine wave curve design. # self.__position_layer[:, 0::2] = torch.sin(position * div_term) # self.__position_layer[:, 1::2] = torch.cos(position * div_term) # # self.__position_layer = self.__position_layer.unsqueeze(0) # self.register_buffer('pe', self.__position_layer) def forward(self, input_x): # Get word vector encoding. embedding_x = self.__embedding_layer(input_x) # Get position encoding. # position_x = Variable(self.pe[:, :input_x.size(1)], requires_grad=False) # Board-casting principle. return embedding_x, embedding_x class LSTMEncoder(nn.Module): """ Encoder structure based on bidirectional LSTM. """ def __init__(self, embedding_dim, hidden_dim, dropout_rate): super(LSTMEncoder, self).__init__() # Parameter recording. self.__embedding_dim = embedding_dim self.__hidden_dim = hidden_dim // 2 self.__dropout_rate = dropout_rate # Network attributes. self.__dropout_layer = nn.Dropout(self.__dropout_rate) self.__lstm_layer = nn.LSTM( input_size=self.__embedding_dim, hidden_size=self.__hidden_dim, batch_first=True, bidirectional=True, dropout=self.__dropout_rate, num_layers=1 ) def forward(self, embedded_text, seq_lens): """ Forward process for LSTM Encoder. (batch_size, max_sent_len) -> (batch_size, max_sent_len, word_dim) -> (batch_size, max_sent_len, hidden_dim) -> (total_word_num, hidden_dim) :param embedded_text: padded and embedded input text. :param seq_lens: is the length of original input text. :return: is encoded word hidden vectors. """ # Padded_text should be instance of LongTensor. dropout_text = self.__dropout_layer(embedded_text) # Pack and Pad process for input of variable length. packed_text = pack_padded_sequence(dropout_text, seq_lens, batch_first=True) lstm_hiddens, (h_last, c_last) = self.__lstm_layer(packed_text) padded_hiddens, _ = pad_packed_sequence(lstm_hiddens, batch_first=True) return torch.cat([padded_hiddens[i][:seq_lens[i], :] for i in range(0, len(seq_lens))], dim=0) class LSTMDecoder(nn.Module): """ Decoder structure based on unidirectional LSTM. """ def __init__(self, input_dim, hidden_dim, output_dim, dropout_rate, embedding_dim=None, extra_dim=None): """ Construction function for Decoder. :param input_dim: input dimension of Decoder. In fact, it's encoder hidden size. :param hidden_dim: hidden dimension of iterative LSTM. :param output_dim: output dimension of Decoder. In fact, it's total number of intent or slot. :param dropout_rate: dropout rate of network which is only useful for embedding. :param embedding_dim: if it's not None, the input and output are relevant. :param extra_dim: if it's not None, the decoder receives information tensors. """ super(LSTMDecoder, self).__init__() self.__input_dim = input_dim self.__hidden_dim = hidden_dim self.__output_dim = output_dim self.__dropout_rate = dropout_rate self.__embedding_dim = embedding_dim self.__extra_dim = extra_dim # If embedding_dim is not None, the output and input # of this structure is relevant. if self.__embedding_dim is not None: self.__embedding_layer = nn.Embedding(output_dim, embedding_dim) self.__init_tensor = nn.Parameter( torch.randn(1, self.__embedding_dim), requires_grad=True ) # Make sure the input dimension of iterative LSTM. if self.__extra_dim is not None and self.__embedding_dim is not None: lstm_input_dim = self.__input_dim + self.__extra_dim + self.__embedding_dim elif self.__extra_dim is not None: lstm_input_dim = self.__input_dim + self.__extra_dim elif self.__embedding_dim is not None: lstm_input_dim = self.__input_dim + self.__embedding_dim else: lstm_input_dim = self.__input_dim # Network parameter definition. self.__dropout_layer = nn.Dropout(self.__dropout_rate) self.__lstm_layer = nn.LSTM( input_size=lstm_input_dim, hidden_size=self.__hidden_dim, batch_first=True, bidirectional=False, dropout=self.__dropout_rate, num_layers=1 ) self.__linear_layer = nn.Linear( self.__hidden_dim, self.__output_dim ) def forward(self, encoded_hiddens, seq_lens, forced_input=None, extra_input=None): """ Forward process for decoder. :param encoded_hiddens: is encoded hidden tensors produced by encoder. :param seq_lens: is a list containing lengths of sentence. :param forced_input: is truth values of label, provided by teacher forcing. :param extra_input: comes from another decoder as information tensor. :return: is distribution of prediction labels. """ # Concatenate information tensor if possible. if extra_input is not None: input_tensor = torch.cat([encoded_hiddens, extra_input], dim=1) else: input_tensor = encoded_hiddens output_tensor_list, sent_start_pos = [], 0 if self.__embedding_dim is None or forced_input is not None: for sent_i in range(0, len(seq_lens)): sent_end_pos = sent_start_pos + seq_lens[sent_i] # Segment input hidden tensors. seg_hiddens = input_tensor[sent_start_pos: sent_end_pos, :] if self.__embedding_dim is not None and forced_input is not None: if seq_lens[sent_i] > 1: seg_forced_input = forced_input[sent_start_pos: sent_end_pos] seg_forced_tensor = self.__embedding_layer(seg_forced_input).view(seq_lens[sent_i], -1) seg_prev_tensor = torch.cat([self.__init_tensor, seg_forced_tensor[:-1, :]], dim=0) else: seg_prev_tensor = self.__init_tensor # Concatenate forced target tensor. combined_input = torch.cat([seg_hiddens, seg_prev_tensor], dim=1) else: combined_input = seg_hiddens dropout_input = self.__dropout_layer(combined_input) lstm_out, _ = self.__lstm_layer(dropout_input.view(1, seq_lens[sent_i], -1)) linear_out = self.__linear_layer(lstm_out.view(seq_lens[sent_i], -1)) output_tensor_list.append(linear_out) sent_start_pos = sent_end_pos else: for sent_i in range(0, len(seq_lens)): prev_tensor = self.__init_tensor # It's necessary to remember h and c state # when output prediction every single step. last_h, last_c = None, None sent_end_pos = sent_start_pos + seq_lens[sent_i] for word_i in range(sent_start_pos, sent_end_pos): seg_input = input_tensor[[word_i], :] combined_input = torch.cat([seg_input, prev_tensor], dim=1) dropout_input = self.__dropout_layer(combined_input).view(1, 1, -1) if last_h is None and last_c is None: lstm_out, (last_h, last_c) = self.__lstm_layer(dropout_input) else: lstm_out, (last_h, last_c) = self.__lstm_layer(dropout_input, (last_h, last_c)) lstm_out = self.__linear_layer(lstm_out.view(1, -1)) output_tensor_list.append(lstm_out) _, index = lstm_out.topk(1, dim=1) prev_tensor = self.__embedding_layer(index).view(1, -1) sent_start_pos = sent_end_pos return torch.cat(output_tensor_list, dim=0) class QKVAttention(nn.Module): """ Attention mechanism based on Query-Key-Value architecture. And especially, when query == key == value, it's self-attention. """ def __init__(self, query_dim, key_dim, value_dim, hidden_dim, output_dim, dropout_rate): super(QKVAttention, self).__init__() # Record hyper-parameters. self.__query_dim = query_dim self.__key_dim = key_dim self.__value_dim = value_dim self.__hidden_dim = hidden_dim self.__output_dim = output_dim self.__dropout_rate = dropout_rate # Declare network structures. self.__query_layer = nn.Linear(self.__query_dim, self.__hidden_dim) self.__key_layer = nn.Linear(self.__key_dim, self.__hidden_dim) self.__value_layer = nn.Linear(self.__value_dim, self.__output_dim) self.__dropout_layer = nn.Dropout(p=self.__dropout_rate) def forward(self, input_query, input_key, input_value): """ The forward propagation of attention. Here we require the first dimension of input key and value are equal. :param input_query: is query tensor, (n, d_q) :param input_key: is key tensor, (m, d_k) :param input_value: is value tensor, (m, d_v) :return: attention based tensor, (n, d_h) """ # Linear transform to fine-tune dimension. linear_query = self.__query_layer(input_query) linear_key = self.__key_layer(input_key) linear_value = self.__value_layer(input_value) score_tensor = F.softmax(torch.matmul( linear_query, linear_key.transpose(-2, -1) ), dim=-1) / math.sqrt(self.__hidden_dim) forced_tensor = torch.matmul(score_tensor, linear_value) forced_tensor = self.__dropout_layer(forced_tensor) return forced_tensor class SelfAttention(nn.Module): def __init__(self, input_dim, hidden_dim, output_dim, dropout_rate): super(SelfAttention, self).__init__() # Record parameters. self.__input_dim = input_dim self.__hidden_dim = hidden_dim self.__output_dim = output_dim self.__dropout_rate = dropout_rate # Record network parameters. self.__dropout_layer = nn.Dropout(self.__dropout_rate) self.__attention_layer = QKVAttention( self.__input_dim, self.__input_dim, self.__input_dim, self.__hidden_dim, self.__output_dim, self.__dropout_rate ) def forward(self, input_x, seq_lens): dropout_x = self.__dropout_layer(input_x) attention_x = self.__attention_layer( dropout_x, dropout_x, dropout_x ) flat_x = torch.cat( [attention_x[i][:seq_lens[i], :] for i in range(0, len(seq_lens))], dim=0 ) return flat_x
17,569
38.483146
111
py
CaBERT-SLU
CaBERT-SLU-main/baseline_stackprop/utils/miulab.py
""" Copy file (including metric) from MiuLab: https://github.com/MiuLab/SlotGated-SLU """ # compute f1 score is modified from conlleval.pl def __startOfChunk(prevTag, tag, prevTagType, tagType, chunkStart=False): if prevTag == 'B' and tag == 'B': chunkStart = True if prevTag == 'I' and tag == 'B': chunkStart = True if prevTag == 'O' and tag == 'B': chunkStart = True if prevTag == 'O' and tag == 'I': chunkStart = True if prevTag == 'E' and tag == 'E': chunkStart = True if prevTag == 'E' and tag == 'I': chunkStart = True if prevTag == 'O' and tag == 'E': chunkStart = True if prevTag == 'O' and tag == 'I': chunkStart = True if tag != 'O' and tag != '.' and prevTagType != tagType: chunkStart = True return chunkStart def __endOfChunk(prevTag, tag, prevTagType, tagType, chunkEnd=False): if prevTag == 'B' and tag == 'B': chunkEnd = True if prevTag == 'B' and tag == 'O': chunkEnd = True if prevTag == 'I' and tag == 'B': chunkEnd = True if prevTag == 'I' and tag == 'O': chunkEnd = True if prevTag == 'E' and tag == 'E': chunkEnd = True if prevTag == 'E' and tag == 'I': chunkEnd = True if prevTag == 'E' and tag == 'O': chunkEnd = True if prevTag == 'I' and tag == 'O': chunkEnd = True if prevTag != 'O' and prevTag != '.' and prevTagType != tagType: chunkEnd = True return chunkEnd def __splitTagType(tag): s = tag.split('-') if len(s) > 2 or len(s) == 0: raise ValueError('tag format wrong. it must be B-xxx.xxx') if len(s) == 1: tag = s[0] tagType = "" else: tag = s[0] tagType = s[1] return tag, tagType def computeF1Score(correct_slots, pred_slots): correctChunk = {} correctChunkCnt = 0.0 foundCorrect = {} foundCorrectCnt = 0.0 foundPred = {} foundPredCnt = 0.0 correctTags = 0.0 tokenCount = 0.0 for correct_slot, pred_slot in zip(correct_slots, pred_slots): inCorrect = False lastCorrectTag = 'O' lastCorrectType = '' lastPredTag = 'O' lastPredType = '' for c, p in zip(correct_slot, pred_slot): correctTag, correctType = __splitTagType(c) predTag, predType = __splitTagType(p) if inCorrect == True: if __endOfChunk(lastCorrectTag, correctTag, lastCorrectType, correctType) == True and \ __endOfChunk(lastPredTag, predTag, lastPredType, predType) == True and \ (lastCorrectType == lastPredType): inCorrect = False correctChunkCnt += 1.0 if lastCorrectType in correctChunk: correctChunk[lastCorrectType] += 1.0 else: correctChunk[lastCorrectType] = 1.0 elif __endOfChunk(lastCorrectTag, correctTag, lastCorrectType, correctType) != \ __endOfChunk(lastPredTag, predTag, lastPredType, predType) or \ (correctType != predType): inCorrect = False if __startOfChunk(lastCorrectTag, correctTag, lastCorrectType, correctType) == True and \ __startOfChunk(lastPredTag, predTag, lastPredType, predType) == True and \ (correctType == predType): inCorrect = True if __startOfChunk(lastCorrectTag, correctTag, lastCorrectType, correctType) == True: foundCorrectCnt += 1 if correctType in foundCorrect: foundCorrect[correctType] += 1.0 else: foundCorrect[correctType] = 1.0 if __startOfChunk(lastPredTag, predTag, lastPredType, predType) == True: foundPredCnt += 1.0 if predType in foundPred: foundPred[predType] += 1.0 else: foundPred[predType] = 1.0 if correctTag == predTag and correctType == predType: correctTags += 1.0 tokenCount += 1.0 lastCorrectTag = correctTag lastCorrectType = correctType lastPredTag = predTag lastPredType = predType if inCorrect == True: correctChunkCnt += 1.0 if lastCorrectType in correctChunk: correctChunk[lastCorrectType] += 1.0 else: correctChunk[lastCorrectType] = 1.0 if foundPredCnt > 0: precision = 1.0 * correctChunkCnt / foundPredCnt else: precision = 0 if foundCorrectCnt > 0: recall = 1.0 * correctChunkCnt / foundCorrectCnt else: recall = 0 if (precision + recall) > 0: f1 = (2.0 * precision * recall) / (precision + recall) else: f1 = 0 return f1, precision, recall
4,131
25.487179
92
py
CaBERT-SLU
CaBERT-SLU-main/baseline_stackprop/utils/__init__.py
__author__ = "Lie Pleased"
26
26
26
py
CaBERT-SLU
CaBERT-SLU-main/baseline_stackprop/utils/process.py
""" @Author : Lee, Qin @StartTime : 2018/08/13 @Filename : process.py @Software : Pycharm @Framework : Pytorch @LastModify : 2019/05/07 """ import torch import torch.nn as nn import torch.optim as optim from torch.autograd import Variable import pickle import os import time import random import numpy as np from tqdm import tqdm from collections import Counter, defaultdict from sklearn.metrics import f1_score from sklearn.preprocessing import MultiLabelBinarizer # Utils functions copied from Slot-gated model, origin url: # https://github.com/MiuLab/SlotGated-SLU/blob/master/utils.py from utils import miulab from utils_bert import evaluate_iob, prf def multilabel2one_hot(labels, nums): res = [0.] * nums if len(labels) == 0: return res if isinstance(labels[0], list): for label in labels[0]: res[label] = 1. return res for label in labels: res[label] = 1. return res def instance2onehot(func, num_intent, data): res = [] for intents in func(data): #res.append(multilabel2one_hot(intents, num_intent)) res.append(intents) return np.array(res) class Processor(object): def __init__(self, dataset, model, batch_size): self.__dataset = dataset self.__model = model self.__batch_size = batch_size if torch.cuda.is_available(): time_start = time.time() self.__model = self.__model.cuda() time_con = time.time() - time_start print("The model has been loaded into GPU and cost {:.6f} seconds.\n".format(time_con)) self.__criterion = nn.NLLLoss() self.__optimizer = optim.Adam( self.__model.parameters(), lr=self.__dataset.learning_rate, weight_decay=self.__dataset.l2_penalty ) with open("data_with_slots/e2e/slot2id.pkl", 'rb') as f: self.slot_dic = pickle.load(f) def train(self): best_dev_slot = 0.0 best_dev_intent = 0.0 best_dev_sent = 0.0 dataloader = self.__dataset.batch_delivery('train') for epoch in range(0, self.__dataset.num_epoch): total_slot_loss, total_intent_loss = 0.0, 0.0 time_start = time.time() self.__model.train() for text_batch, slot_batch, intent_batch in tqdm(dataloader, ncols=50): padded_text, [sorted_slot, sorted_intent], seq_lens, _ = self.__dataset.add_padding( text_batch, [(slot_batch, False), (intent_batch, False)] ) sorted_intent = [item * num for item, num in zip(sorted_intent, seq_lens)] sorted_intent = list(Evaluator.expand_list(sorted_intent)) text_var = Variable(torch.LongTensor(padded_text)) slot_var = Variable(torch.LongTensor(list(Evaluator.expand_list(sorted_slot)))) intent_var = Variable(torch.LongTensor(sorted_intent)) if torch.cuda.is_available(): text_var = text_var.cuda() slot_var = slot_var.cuda() intent_var = intent_var.cuda() random_slot, random_intent = random.random(), random.random() if random_slot < self.__dataset.slot_forcing_rate and \ random_intent < self.__dataset.intent_forcing_rate: slot_out, intent_out = self.__model( text_var, seq_lens, forced_slot=slot_var, forced_intent=intent_var ) elif random_slot < self.__dataset.slot_forcing_rate: slot_out, intent_out = self.__model( text_var, seq_lens, forced_slot=slot_var ) elif random_intent < self.__dataset.intent_forcing_rate: slot_out, intent_out = self.__model( text_var, seq_lens, forced_intent=intent_var ) else: slot_out, intent_out = self.__model(text_var, seq_lens) slot_loss = self.__criterion(slot_out, slot_var) intent_loss = self.__criterion(intent_out, intent_var) batch_loss = slot_loss + intent_loss self.__optimizer.zero_grad() batch_loss.backward() self.__optimizer.step() try: total_slot_loss += slot_loss.cpu().item() total_intent_loss += intent_loss.cpu().item() except AttributeError: total_slot_loss += slot_loss.cpu().data.numpy()[0] total_intent_loss += intent_loss.cpu().data.numpy()[0] time_con = time.time() - time_start print('[Epoch {:2d}]: The total slot loss on train data is {:2.6f}, intent data is {:2.6f}, cost ' \ 'about {:2.6} seconds.'.format(epoch, total_slot_loss, total_intent_loss, time_con)) change, time_start = False, time.time() dev_f1_score, dev_intent_f1,dev_acc, dev_sent_acc = self.estimate(if_dev=True, test_batch=self.__batch_size) if dev_f1_score > best_dev_slot or dev_acc > best_dev_intent or dev_sent_acc > best_dev_sent: test_f1, test_intent_f1,test_acc, test_sent_acc = self.estimate(if_dev=False, test_batch=self.__batch_size) if dev_f1_score > best_dev_slot: best_dev_slot = dev_f1_score if dev_acc > best_dev_intent: best_dev_intent = dev_acc if dev_sent_acc > best_dev_sent: best_dev_sent = dev_sent_acc print('\nTest result: slot f1 score: {:.6f}, intent f1 score: {:.6f},intent acc score: {:.6f}, semantic ' 'accuracy score: {:.6f}.'.format(test_f1, test_intent_f1,test_acc, test_sent_acc)) model_save_dir = os.path.join(self.__dataset.save_dir, "model") if not os.path.exists(model_save_dir): os.mkdir(model_save_dir) torch.save(self.__model, os.path.join(model_save_dir, "model.pkl")) torch.save(self.__dataset, os.path.join(model_save_dir, 'dataset.pkl')) time_con = time.time() - time_start print('[Epoch {:2d}]: In validation process, the slot f1 score is {:2.6f}, ' \ 'the intent f1 is {:2.6f}, the intent acc is {:2.6f}, the semantic acc is {:.2f}, cost about ' \ '{:2.6f} seconds.\n'.format(epoch, dev_f1_score, dev_intent_f1,dev_acc, dev_sent_acc, time_con)) def estimate(self, if_dev, test_batch=100): """ Estimate the performance of model on dev or test dataset. """ if if_dev: pred_slot, real_slot, pred_intent, real_intent, _ = self.prediction( self.__model, self.__dataset, "dev", test_batch ) else: pred_slot, real_slot, pred_intent, real_intent, _ = self.prediction( self.__model, self.__dataset, "test", test_batch ) # evaluate IOB stats = defaultdict(Counter) for pred, real in zip(pred_slot, real_slot): evaluate_iob(pred, real, self.slot_dic, stats) # print slot stats p_slot, r_slot, f1_slot = prf(stats['total']) print(f'Slot Score: P = {p_slot:.4f}, R = {r_slot:.4f}, F1 = {f1_slot:.4f}') #slot_f1_socre = f1_score(bi_pred_slot, bi_real_slot, average='micro') intent_f1_socre = f1_score(pred_intent, real_intent, average='micro') intent_acc = Evaluator.accuracy(pred_intent, real_intent) sent_acc = Evaluator.semantic_acc(pred_slot, real_slot, pred_intent, real_intent) return f1_slot,intent_f1_socre, intent_acc, sent_acc ####### @staticmethod def validate(model_path, dataset_path, batch_size): """ validation will write mistaken samples to files and make scores. """ model = torch.load(model_path) dataset = torch.load(dataset_path) # Get the sentence list in test dataset. sent_list = dataset.test_sentence pred_slot, real_slot, exp_pred_intent, real_intent, pred_intent = Processor.prediction( model, dataset, "test", batch_size ) # To make sure the directory for save error prediction. mistake_dir = os.path.join(dataset.save_dir, "error") if not os.path.exists(mistake_dir): os.mkdir(mistake_dir) slot_file_path = os.path.join(mistake_dir, "slot.txt") intent_file_path = os.path.join(mistake_dir, "intent.txt") both_file_path = os.path.join(mistake_dir, "both.txt") # Write those sample with mistaken slot prediction. with open(slot_file_path, 'w') as fw: for w_list, r_slot_list, p_slot_list in zip(sent_list, real_slot, pred_slot): if r_slot_list != p_slot_list: for w, r, p in zip(w_list, r_slot_list, p_slot_list): fw.write(w + '\t' + r + '\t' + p + '\n') fw.write('\n') # Write those sample with mistaken intent prediction. with open(intent_file_path, 'w') as fw: for w_list, p_intent_list, r_intent, p_intent in zip(sent_list, pred_intent, real_intent, exp_pred_intent): if p_intent != r_intent: for w, p in zip(w_list, p_intent_list): fw.write(w + '\t' + p + '\n') fw.write(r_intent + '\t' + p_intent + '\n\n') # Write those sample both have intent and slot errors. with open(both_file_path, 'w') as fw: for w_list, r_slot_list, p_slot_list, p_intent_list, r_intent, p_intent in \ zip(sent_list, real_slot, pred_slot, pred_intent, real_intent, exp_pred_intent): if r_slot_list != p_slot_list or r_intent != p_intent: for w, r_slot, p_slot, p_intent_ in zip(w_list, r_slot_list, p_slot_list, p_intent_list): fw.write(w + '\t' + r_slot + '\t' + p_slot + '\t' + p_intent_ + '\n') fw.write(r_intent + '\t' + p_intent + '\n\n') # evaluate IOB with open("data_with_slots/e2e/slot2id.pkl", 'rb') as f: slot_dic = pickle.load(f) stats = defaultdict(Counter) for pred, real in zip(pred_slot, real_slot): evaluate_iob(pred, real, slot_dic, stats) # print slot stats p_slot, r_slot, f1_slot = prf(stats['total']) print(f'Slot Score: P = {p_slot:.4f}, R = {r_slot:.4f}, F1 = {f1_slot:.4f}') slot_f1 = miulab.computeF1Score(pred_slot, real_slot)[0] intent_f1 = f1_score(pred_intent, real_intent, average='micro') intent_acc = Evaluator.accuracy(exp_pred_intent, real_intent) sent_acc = Evaluator.semantic_acc(pred_slot, real_slot, exp_pred_intent, real_intent) return slot_f1, intent_f1, intent_acc, sent_acc @staticmethod def prediction(model, dataset, mode, batch_size): model.eval() if mode == "dev": dataloader = dataset.batch_delivery('dev', batch_size=batch_size, shuffle=False, is_digital=False) elif mode == "test": dataloader = dataset.batch_delivery('test', batch_size=batch_size, shuffle=False, is_digital=False) else: raise Exception("Argument error! mode belongs to {\"dev\", \"test\"}.") pred_slot, real_slot = [], [] pred_intent, real_intent = [], [] for text_batch, slot_batch, intent_batch in tqdm(dataloader, ncols=50): padded_text, [sorted_slot, sorted_intent], seq_lens, sorted_index = dataset.add_padding( text_batch, [(slot_batch, False), (intent_batch, False)], digital=False ) # Because it's a visualization bug, in valid time, it doesn't matter # Only in test time will it need to restore if mode == 'test': tmp_r_slot = [[] for _ in range(len(sorted_index))] for i in range(len(sorted_index)): tmp_r_slot[sorted_index[i]] = sorted_slot[i] sorted_slot = tmp_r_slot tmp_intent = [[] for _ in range(len(sorted_index))] for i in range(len(sorted_index)): tmp_intent[sorted_index[i]] = sorted_intent[i] sorted_intent = tmp_intent real_slot.extend(sorted_slot) real_intent.extend(list(Evaluator.expand_list(sorted_intent))) digit_text = dataset.word_alphabet.get_index(padded_text) var_text = Variable(torch.LongTensor(digit_text)) if torch.cuda.is_available(): var_text = var_text.cuda() slot_idx, intent_idx = model(var_text, seq_lens, n_predicts=1) nested_slot = Evaluator.nested_list([list(Evaluator.expand_list(slot_idx))], seq_lens)[0] if mode == 'test': tmp_r_slot = [[] for _ in range(len(sorted_index))] for i in range(len(sorted_index)): tmp_r_slot[sorted_index[i]] = nested_slot[i] nested_slot = tmp_r_slot pred_slot.extend(dataset.slot_alphabet.get_instance(nested_slot)) nested_intent = Evaluator.nested_list([list(Evaluator.expand_list(intent_idx))], seq_lens)[0] if mode == 'test': tmp_intent = [[] for _ in range(len(sorted_index))] for i in range(len(sorted_index)): tmp_intent[sorted_index[i]] = nested_intent[i] nested_intent = tmp_intent pred_intent.extend(dataset.intent_alphabet.get_instance(nested_intent)) exp_pred_intent = Evaluator.max_freq_predict(pred_intent) return pred_slot, real_slot, exp_pred_intent, real_intent, pred_intent class Evaluator(object): @staticmethod def semantic_acc(pred_slot, real_slot, pred_intent, real_intent): """ Compute the accuracy based on the whole predictions of given sentence, including slot and intent. """ total_count, correct_count = 0.0, 0.0 for p_slot, r_slot, p_intent, r_intent in zip(pred_slot, real_slot, pred_intent, real_intent): if p_slot == r_slot and p_intent == r_intent: correct_count += 1.0 total_count += 1.0 return 1.0 * correct_count / total_count @staticmethod def accuracy(pred_list, real_list): """ Get accuracy measured by predictions and ground-trues. """ pred_array = np.array(list(Evaluator.expand_list(pred_list))) real_array = np.array(list(Evaluator.expand_list(real_list))) return (pred_array == real_array).sum() * 1.0 / len(pred_array) @staticmethod def f1_score(pred_list, real_list): """ Get F1 score measured by predictions and ground-trues. """ tp, fp, fn = 0.0, 0.0, 0.0 for i in range(len(pred_list)): seg = set() result = [elem.strip() for elem in pred_list[i]] target = [elem.strip() for elem in real_list[i]] j = 0 while j < len(target): cur = target[j] if cur[0] == 'B': k = j + 1 while k < len(target): str_ = target[k] if not (str_[0] == 'I' and cur[1:] == str_[1:]): break k = k + 1 seg.add((cur, j, k - 1)) j = k - 1 j = j + 1 tp_ = 0 j = 0 while j < len(result): cur = result[j] if cur[0] == 'B': k = j + 1 while k < len(result): str_ = result[k] if not (str_[0] == 'I' and cur[1:] == str_[1:]): break k = k + 1 if (cur, j, k - 1) in seg: tp_ += 1 else: fp += 1 j = k - 1 j = j + 1 fn += len(seg) - tp_ tp += tp_ p = tp / (tp + fp) if tp + fp != 0 else 0 r = tp / (tp + fn) if tp + fn != 0 else 0 return 2 * p * r / (p + r) if p + r != 0 else 0 """ Max frequency prediction. """ @staticmethod def max_freq_predict(sample): predict = [] for items in sample: predict.append(Counter(items).most_common(1)[0][0]) return predict @staticmethod def exp_decay_predict(sample, decay_rate=0.8): predict = [] for items in sample: item_dict = {} curr_weight = 1.0 for item in items[::-1]: item_dict[item] = item_dict.get(item, 0) + curr_weight curr_weight *= decay_rate predict.append(sorted(item_dict.items(), key=lambda x_: x_[1])[-1][0]) return predict @staticmethod def expand_list(nested_list): for item in nested_list: if isinstance(item, (list, tuple)): for sub_item in Evaluator.expand_list(item): yield sub_item else: yield item @staticmethod def nested_list(items, seq_lens): num_items = len(items) trans_items = [[] for _ in range(0, num_items)] count = 0 for jdx in range(0, len(seq_lens)): for idx in range(0, num_items): trans_items[idx].append(items[idx][count:count + seq_lens[jdx]]) count += seq_lens[jdx] return trans_items
18,042
39.364653
123
py
CaBERT-SLU
CaBERT-SLU-main/baseline_stackprop/utils/loader.py
""" @Author : Lee, Qin @StartTime : 2018/08/13 @Filename : loader.py @Software : Pycharm @Framework : Pytorch @LastModify : 2019/05/07 """ import os import numpy as np from copy import deepcopy from collections import Counter from collections import OrderedDict from ordered_set import OrderedSet from torch.utils.data import Dataset from torch.utils.data import DataLoader class Alphabet(object): """ Storage and serialization a set of elements. """ def __init__(self, name, if_use_pad, if_use_unk): self.__name = name self.__if_use_pad = if_use_pad self.__if_use_unk = if_use_unk self.__index2instance = OrderedSet() self.__instance2index = OrderedDict() # Counter Object record the frequency # of element occurs in raw text. self.__counter = Counter() if if_use_pad: self.__sign_pad = "<PAD>" self.add_instance(self.__sign_pad) if if_use_unk: self.__sign_unk = "<UNK>" self.add_instance(self.__sign_unk) @property def name(self): return self.__name def add_instance(self, instance): """ Add instances to alphabet. 1, We support any iterative data structure which contains elements of str type. 2, We will count added instances that will influence the serialization of unknown instance. :param instance: is given instance or a list of it. """ if isinstance(instance, (list, tuple)): for element in instance: self.add_instance(element) return # We only support elements of str type. assert isinstance(instance, str) # count the frequency of instances. self.__counter[instance] += 1 if instance not in self.__index2instance: self.__instance2index[instance] = len(self.__index2instance) self.__index2instance.append(instance) def get_index(self, instance): """ Serialize given instance and return. For unknown words, the return index of alphabet depends on variable self.__use_unk: 1, If True, then return the index of "<UNK>"; 2, If False, then return the index of the element that hold max frequency in training data. :param instance: is given instance or a list of it. :return: is the serialization of query instance. """ if isinstance(instance, (list, tuple)): return [self.get_index(elem) for elem in instance] assert isinstance(instance, str) try: return self.__instance2index[instance] except KeyError: if self.__if_use_unk: return self.__instance2index[self.__sign_unk] else: max_freq_item = self.__counter.most_common(1)[0][0] return self.__instance2index[max_freq_item] def get_instance(self, index): """ Get corresponding instance of query index. if index is invalid, then throws exception. :param index: is query index, possibly iterable. :return: is corresponding instance. """ if isinstance(index, list): return [self.get_instance(elem) for elem in index] return self.__index2instance[index] def save_content(self, dir_path): """ Save the content of alphabet to files. There are two kinds of saved files: 1, The first is a list file, elements are sorted by the frequency of occurrence. 2, The second is a dictionary file, elements are sorted by it serialized index. :param dir_path: is the directory path to save object. """ # Check if dir_path exists. if not os.path.exists(dir_path): os.mkdir(dir_path) list_path = os.path.join(dir_path, self.__name + "_list.txt") with open(list_path, 'w') as fw: for element, frequency in self.__counter.most_common(): fw.write(element + '\t' + str(frequency) + '\n') dict_path = os.path.join(dir_path, self.__name + "_dict.txt") with open(dict_path, 'w') as fw: for index, element in enumerate(self.__index2instance): fw.write(element + '\t' + str(index) + '\n') def __len__(self): return len(self.__index2instance) def __str__(self): return 'Alphabet {} contains about {} words: \n\t{}'.format(self.name, len(self), self.__index2instance) class TorchDataset(Dataset): """ Helper class implementing torch.utils.data.Dataset to instantiate DataLoader which deliveries data batch. """ def __init__(self, text, slot, intent): self.__text = text self.__slot = slot self.__intent = intent def __getitem__(self, index): return self.__text[index], self.__slot[index], self.__intent[index] def __len__(self): # Pre-check to avoid bug. assert len(self.__text) == len(self.__slot) assert len(self.__text) == len(self.__intent) return len(self.__text) class DatasetManager(object): def __init__(self, args): # Instantiate alphabet objects. self.__word_alphabet = Alphabet('word', if_use_pad=True, if_use_unk=True) self.__slot_alphabet = Alphabet('slot', if_use_pad=False, if_use_unk=False) self.__intent_alphabet = Alphabet('intent', if_use_pad=False, if_use_unk=False) # Record the raw text of dataset. self.__text_word_data = {} self.__text_slot_data = {} self.__text_intent_data = {} # Record the serialization of dataset. self.__digit_word_data = {} self.__digit_slot_data = {} self.__digit_intent_data = {} self.__args = args @property def test_sentence(self): return deepcopy(self.__text_word_data['test']) @property def word_alphabet(self): return deepcopy(self.__word_alphabet) @property def slot_alphabet(self): return deepcopy(self.__slot_alphabet) @property def intent_alphabet(self): return deepcopy(self.__intent_alphabet) @property def num_epoch(self): return self.__args.num_epoch @property def batch_size(self): return self.__args.batch_size @property def learning_rate(self): return self.__args.learning_rate @property def l2_penalty(self): return self.__args.l2_penalty @property def save_dir(self): return self.__args.save_dir @property def intent_forcing_rate(self): return self.__args.intent_forcing_rate @property def slot_forcing_rate(self): return self.__args.slot_forcing_rate def show_summary(self): """ :return: show summary of dataset, training parameters. """ print("Training parameters are listed as follows:\n") print('\tnumber of train sample: {};'.format(len(self.__text_word_data['train']))) print('\tnumber of dev sample: {};'.format(len(self.__text_word_data['dev']))) print('\tnumber of test sample: {};'.format(len(self.__text_word_data['test']))) print('\tnumber of epoch: {};'.format(self.num_epoch)) print('\tbatch size: {};'.format(self.batch_size)) print('\tlearning rate: {};'.format(self.learning_rate)) print('\trandom seed: {};'.format(self.__args.random_state)) print('\trate of l2 penalty: {};'.format(self.l2_penalty)) print('\trate of dropout in network: {};'.format(self.__args.dropout_rate)) print('\tteacher forcing rate(slot) {};'.format(self.slot_forcing_rate)) print('\tteacher forcing rate(intent): {};'.format(self.intent_forcing_rate)) print("\nEnd of parameters show. Save dir: {}.\n\n".format(self.save_dir)) def quick_build(self): """ Convenient function to instantiate a dataset object. """ train_path = os.path.join(self.__args.data_dir, 'train.txt') dev_path = os.path.join(self.__args.data_dir, 'dev.txt') test_path = os.path.join(self.__args.data_dir, 'test.txt') self.add_file(train_path, 'train', if_train_file=True) self.add_file(dev_path, 'dev', if_train_file=False) self.add_file(test_path, 'test', if_train_file=False) # Check if save path exists. if not os.path.exists(self.save_dir): os.mkdir(self.save_dir) alphabet_dir = os.path.join(self.__args.save_dir, "alphabet") self.__word_alphabet.save_content(alphabet_dir) self.__slot_alphabet.save_content(alphabet_dir) self.__intent_alphabet.save_content(alphabet_dir) def get_dataset(self, data_name, is_digital): """ Get dataset of given unique name. :param data_name: is name of stored dataset. :param is_digital: make sure if want serialized data. :return: the required dataset. """ if is_digital: return self.__digit_word_data[data_name], \ self.__digit_slot_data[data_name], \ self.__digit_intent_data[data_name] else: return self.__text_word_data[data_name], \ self.__text_slot_data[data_name], \ self.__text_intent_data[data_name] def add_file(self, file_path, data_name, if_train_file): text, slot, intent = self.__read_file(file_path) if if_train_file: self.__word_alphabet.add_instance(text) self.__slot_alphabet.add_instance(slot) self.__intent_alphabet.add_instance(intent) # Record the raw text of dataset. self.__text_word_data[data_name] = text self.__text_slot_data[data_name] = slot self.__text_intent_data[data_name] = intent # Serialize raw text and stored it. self.__digit_word_data[data_name] = self.__word_alphabet.get_index(text) if if_train_file: self.__digit_slot_data[data_name] = self.__slot_alphabet.get_index(slot) self.__digit_intent_data[data_name] = self.__intent_alphabet.get_index(intent) @staticmethod def __read_file(file_path): """ Read data file of given path. :param file_path: path of data file. :return: list of sentence, list of slot and list of intent. """ texts, slots, intents = [], [], [] text, slot = [], [] with open(file_path, 'r') as fr: for line in fr.readlines(): items = line.strip().split() if len(items) == 1: texts.append(text) slots.append(slot) intents.append(items) # clear buffer lists. text, slot = [], [] elif len(items) == 2: text.append(items[0].strip()) slot.append(items[1].strip()) return texts, slots, intents def batch_delivery(self, data_name, batch_size=None, is_digital=True, shuffle=True): if batch_size is None: batch_size = self.batch_size if is_digital: text = self.__digit_word_data[data_name] slot = self.__digit_slot_data[data_name] intent = self.__digit_intent_data[data_name] else: text = self.__text_word_data[data_name] slot = self.__text_slot_data[data_name] intent = self.__text_intent_data[data_name] dataset = TorchDataset(text, slot, intent) return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, collate_fn=self.__collate_fn) @staticmethod def add_padding(texts, items=None, digital=True): len_list = [len(text) for text in texts] max_len = max(len_list) # Get sorted index of len_list. sorted_index = np.argsort(len_list)[::-1] trans_texts, seq_lens, trans_items = [], [], None if items is not None: trans_items = [[] for _ in range(0, len(items))] for index in sorted_index: seq_lens.append(deepcopy(len_list[index])) trans_texts.append(deepcopy(texts[index])) if digital: trans_texts[-1].extend([0] * (max_len - len_list[index])) else: trans_texts[-1].extend(['<PAD>'] * (max_len - len_list[index])) # This required specific if padding after sorting. if items is not None: for item, (o_item, required) in zip(trans_items, items): item.append(deepcopy(o_item[index])) if required: if digital: item[-1].extend([0] * (max_len - len_list[index])) else: item[-1].extend(['<PAD>'] * (max_len - len_list[index])) if items is not None: return trans_texts, trans_items, seq_lens, sorted_index else: return trans_texts, seq_lens, sorted_index @staticmethod def __collate_fn(batch): """ helper function to instantiate a DataLoader Object. """ n_entity = len(batch[0]) modified_batch = [[] for _ in range(0, n_entity)] for idx in range(0, len(batch)): for jdx in range(0, n_entity): modified_batch[jdx].append(batch[idx][jdx]) return modified_batch
13,758
32.31477
112
py
CaBERT-SLU
CaBERT-SLU-main/data/dialogue_data.py
import torch as t from torch.autograd import Variable import numpy as np import pandas as pd import re import pickle import h5py import json import os import csv import spacy from nltk.tokenize import word_tokenize from transformers import BertTokenizer, BertModel, BertForMaskedLM import time class Data: def __init__(self, data_path, rawdata_path, intent2id_path): self.data_path = data_path self.rawdata_path = rawdata_path self.intent2id_path = intent2id_path self.REPLACE_BY_SPACE_RE = re.compile(r'[/(){}\[\]\|@,;]') self.BAD_SYMBOLS_RE = re.compile(r'[^0-9a-z #+_]') self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True) #==================================================# # Text Prepare # #==================================================# #pure virtual function def prepare_text(self): raise NotImplementedError("Please define virtual function!!") # prepare text def text_prepare(self, text, mode): """ text: a string return: modified string """ text = text.lower() # lowercase text text = re.sub(self.REPLACE_BY_SPACE_RE, ' ', text) # replace REPLACE_BY_SPACE_RE symbols by space in text text = re.sub(self.BAD_SYMBOLS_RE, '', text) # delete symbols which are in BAD_SYMBOLS_RE from text text = re.sub(r"[ ]+", " ", text) text = re.sub(r"\!+", "!", text) text = re.sub(r"\,+", ",", text) text = re.sub(r"\?+", "?", text) if mode == "Bert": text = "[CLS] " + text + " [SEP]" tokenized_text = self.tokenizer.tokenize(text) tokenized_ids = self.tokenizer.convert_tokens_to_ids(tokenized_text) text = tokenized_ids return text ################################## class E2EData(Data): def __init__(self, data_path, rawdata_path, intent2id_path, slot2id_path, done=True): super(E2EData, self).__init__(data_path, rawdata_path, intent2id_path) self.slot2id_path = slot2id_path self.train_data, self.intent2id, self.slot2id = self.prepare_dialogue(done) self.num_labels = len(self.intent2id) def get_tags(self, slot_name, string): tags = [] slot_words = word_tokenize(string.lower()) for i, slot in enumerate(slot_words): if i == 0: tags.append('B-'+slot_name) else: tags.append('I-'+slot_name) if len(slot_words) > 0: return slot_words[0], (tags, ' '.join(slot_words)) else: return None, None def modify_slots(self, slots): slot_dic = {} for slot_pair in slots: slot_n = slot_pair[0].strip() if slot_n != 'other' and slot_n != 'description': if slot_pair[1].find('{') == -1: # only one slot value key, value = self.get_tags(slot_n, slot_pair[1]) if key: slot_dic[key] = value else: # more than one slot value strings = slot_pair[1][1:-1].split('#') for string in strings: key, value = self.get_tags(slot_n, string) if key: slot_dic[key] = value return slot_dic def text_prepare_tag(self, tokens, text_labels): """Auxiliary function for parsing tokens. @param tokens: raw tokens @param text_labels: raw_labels """ tokenized_sentence = [] labels = [] # Reparse the labels in parallel with the results after Bert tokenization for word, label in zip(tokens, text_labels): tokenized_word = self.tokenizer.tokenize(word) n_subwords = len(tokenized_word) tokenized_sentence.extend(tokenized_word) if label.find('B-') != -1: labels.extend([label]) labels.extend(['I-'+label[2:]] * (n_subwords-1)) else: labels.extend([label] * n_subwords) tokenized_ids = self.tokenizer.convert_tokens_to_ids(['[CLS]']+tokenized_sentence+['[SEP]']) labels = ['[PAD]']+labels+['[PAD]'] return tokenized_sentence, tokenized_ids, labels def prepare(self, data_path, intent2id, counter, slot2id, scounter): print('Parsing file: ', data_path) all_data = [] data = [] prev_id = '1' with open(self.data_path+data_path, 'r') as f: for i, line in enumerate(f): if i == 0: continue infos = line.split('\t') dialogue_id = infos[0] message_id = infos[1] speaker = infos[3] text = infos[4] intents = [] slots = [] for act in infos[5:]: if act[:act.find('(')] != '': intents.append(act[:act.find('(')]) s = re.findall('\((.*)\)', act) if s: slots.append(s[0].split(';')) ############################### single intent ############################### # intents = "@".join(sorted(intents)) # if intents not in intent2id: # intent2id[intents] = counter # counter += 1 # intents = intent2id[intents] ############################### multi intents ############################### for intent in intents: if intent not in intent2id: intent2id[intent] = (counter, self.text_prepare(intent, 'Bert')) # counter counter += 1 intents = [intent2id[intent][0] for intent in intents] intents = list(set(intents)) #################################### slots ################################### text = word_tokenize(text.lower()) if len(slots) == 0: final_tags = ['O']*len(text) else: if len(slots) == 1: slots_split = [slot.split('=') for slot in slots[0] if len(slot.split('=')) == 2] else: news = [] for slot in slots: news.extend(slot) slots_split = [slot.split('=') for slot in news if len(slot.split('=')) == 2] slot_dic = self.modify_slots(slots_split) final_tags = [] cc = 0 for i, word in enumerate(text): if i < cc: continue if word in slot_dic and ' '.join(text[i:i+len(slot_dic[word][0])]) == slot_dic[word][1]: final_tags.extend(slot_dic[word][0]) cc += len(slot_dic[word][0]) else: final_tags.append('O') cc += 1 if data and prev_id != dialogue_id: all_data.append(data) data = [] prev_id = dialogue_id utt, utt_ids, final_tags = self.text_prepare_tag(text, final_tags) ############################ slots conver to ids ################################### for slot in final_tags: if slot not in slot2id: slot2id[slot] = scounter # counter scounter += 1 slots_ids = [slot2id[slot] for slot in final_tags] data.append((utt_ids, slots_ids, intents)) # data.append((utt, utt_ids, final_tags, slots_ids, intents)) # data.append((text, intents, slots)) return all_data, counter, scounter def prepare_dialogue(self, done): """ train_data: a list of dialogues for each dialogue: [(sent1, [label1, label2], [slot1, slot2]), (sent2, [label2], [slot2]),...] """ if done: with open(self.rawdata_path, "rb") as f: train_data = pickle.load(f) with open(self.intent2id_path, "rb") as f: intent2id = pickle.load(f) with open(self.slot2id_path, "rb") as f: slot2id = pickle.load(f) return train_data, intent2id, slot2id ptime = time.time() # if os.path.exists(self.intent2id_path): # with open(self.intent2id_path, "rb") as f: # intent2id = pickle.load(f) # counter = len(intent2id) # else: intent2id = {} counter = 0 slot2id = {} scounter = 0 all_data = [] for data_path in os.listdir(self.data_path): data, counter, scounter = self.prepare(data_path, intent2id, counter, slot2id, scounter) all_data += data with open(self.rawdata_path, "wb") as f: pickle.dump(all_data, f) with open(self.intent2id_path, "wb") as f: pickle.dump(intent2id, f) with open(self.slot2id_path, "wb") as f: pickle.dump(slot2id, f) print("Process time: ", time.time()-ptime) return all_data, intent2id, slot2id ############################################################################ class SGDData(Data): def __init__(self, data_path, rawdata_path, intent2id_path, slot2id_path, turn_path, done=True): super(SGDData, self).__init__(data_path, rawdata_path, intent2id_path) self.slot2id_path = slot2id_path self.turn_path = turn_path self.train_data, self.intent2id, self.slot2id, self.turn_data_all = self.prepare_dialogue(done) self.num_labels = len(self.intent2id) self.num_slot_labels = len(self.slot2id) def build_ids(self, items, item2id, counter): for item in items: if item not in item2id: item2id[item] = (counter, self.text_prepare(item, 'Bert')) # counter counter += 1 items = [item2id[item][0] for item in items] return items, item2id, counter def get_tags(self, slot_name, string): tags = [] slot_words = word_tokenize(string.lower()) for i, slot in enumerate(slot_words): if i == 0: tags.append('B-'+slot_name) else: tags.append('I-'+slot_name) if len(slot_words) > 0: return slot_words[0], (tags, ' '.join(slot_words)) else: return None, None def text_prepare_tag(self, tokens, text_labels): """Auxiliary function for parsing tokens. @param tokens: raw tokens @param text_labels: raw_labels """ tokenized_sentence = [] labels = [] # Reparse the labels in parallel with the results after Bert tokenization for word, label in zip(tokens, text_labels): tokenized_word = self.tokenizer.tokenize(word) n_subwords = len(tokenized_word) tokenized_sentence.extend(tokenized_word) if label.find('B-') != -1: labels.extend([label]) labels.extend(['I-'+label[2:]] * (n_subwords-1)) else: labels.extend([label] * n_subwords) tokenized_ids = self.tokenizer.convert_tokens_to_ids(['[CLS]']+tokenized_sentence+['[SEP]']) labels = ['[PAD]']+labels+['[PAD]'] return tokenized_sentence, tokenized_ids, labels def prepare_dialogue(self, done): """ train_data: a list of dialogues (utterance-level) for each dialogue: [(sent1, [label1, label2], [slot1, slot2]), (sent2, [label2], [slot2]),...] a list of dialogues (turn-level) for each dialogue: [(turn1, intents1, requested_slots1, slots1, values1),... (turn2, intents2, requested_slots2, slots2, values2),...] """ if done: with open(self.rawdata_path, "rb") as f: train_data = pickle.load(f) with open(self.intent2id_path, "rb") as f: intent2id = pickle.load(f) with open(self.slot2id_path, "rb") as f: slot2id = pickle.load(f) with open(self.turn_path, "rb") as f: turn_data_all = pickle.load(f) return train_data, intent2id, slot2id, turn_data_all ptime = time.time() # if os.path.exists(self.intent2id_path): # with open(self.intent2id_path, "rb") as f: # intent2id = pickle.load(f) # counter = len(intent2id) # else: intent2id = {} counter = 0 aintent2id = {} acounter = 0 request2id = {} rcounter = 0 slot2id = {} scounter = 0 all_data = [] all_data_turn = [] services = [] for file in sorted(os.listdir(self.data_path))[:-1]: with open(os.path.join(self.data_path, file), 'r') as f: print('Parsing file: ', file) raw_data = json.load(f) for dialogue in raw_data: # if len(dialogue['services']) == 1: # continue # utterance data data = [] # turn data prev_text = 'this is a dummy sentence' prev_data = ('', '', '') data_turn = [] for turns in dialogue['turns']: ###################### utterance ########################## intents = [] slots = [] for action in turns['frames'][0]['actions']: intents.append(action['act']) slots.append((action['slot'], action['values'])) intents = list(set(intents)) # single intent # intents = "@".join(intents) # if intents not in intent2id: # intent2id[intents] = counter # counter += 1 # intents = intent2id[intents] ###################### multi intents ###################### for intent in intents: if intent not in intent2id: intent2id[intent] = (counter, self.text_prepare(intent, 'Bert')) # counter counter += 1 intents = [intent2id[intent][0] for intent in intents] # slot values number if 'slots' in turns['frames'][0]: slot_nums = turns['frames'][0]['slots'] else: slot_nums = [] ###################### slots ###################### utt = turns['utterance'] utt_token = word_tokenize(utt.lower()) slot_dic = {} if len(slot_nums) == 0: final_tags = ['O']*len(utt_token) else: for slot_dic_example in slot_nums: start = slot_dic_example['start'] end = slot_dic_example['exclusive_end'] slot_name = slot_dic_example['slot'] slot_words = utt[start:end] key, value = self.get_tags(slot_name, slot_words) if key: slot_dic[key] = value final_tags = [] rc = 0 for i, word in enumerate(utt_token): if i < rc: continue if word in slot_dic and ' '.join(utt_token[i:i+len(slot_dic[word][0])]) == slot_dic[word][1]: final_tags.extend(slot_dic[word][0]) rc += len(slot_dic[word][0]) else: final_tags.append('O') rc += 1 utt, utt_ids, final_tags = self.text_prepare_tag(utt_token, final_tags) ############################ slots conver to ids ################################### for slot in final_tags: if slot not in slot2id: slot2id[slot] = scounter # counter scounter += 1 slots_ids = [slot2id[slot] for slot in final_tags] # data.append((self.text_prepare(turns['utterance'], 'Bert'), intents, slots)) data.append((utt_ids, slots_ids, intents)) # data.append((utt_token, utt_ids, slot_nums, slots_ids, intents)) ###################### turn ########################## if 'state' in turns['frames'][0]: slot_values = turns['frames'][0]['state']['slot_values'] if not slot_values: s_turn = [] v_turn = [] else: s_turn, v_turn = zip(*[(k,v[0]) for k, v in slot_values.items()]) encoded = self.tokenizer.encode_plus(prev_text, text_pair=turns['utterance'], return_tensors='pt') aintents, aintent2id, acounter = self.build_ids([turns['frames'][0]['state']['active_intent']], aintent2id, acounter) requests, request2id, rcounter = self.build_ids(turns['frames'][0]['state']['requested_slots'], request2id, rcounter) data_turn.append((encoded['input_ids'], aintents, requests, s_turn, v_turn, (prev_data, data[-1]))) prev_text = turns['utterance'] else: prev_text = turns['utterance'] prev_data = data[-1] all_data.append(data) all_data_turn.append(data_turn) services.append(dialogue['services']) with open(self.rawdata_path, "wb") as f: pickle.dump(all_data, f) with open(self.intent2id_path, "wb") as f: pickle.dump(intent2id, f) with open(self.slot2id_path, "wb") as f: pickle.dump(slot2id, f) with open("sgd_dialogue/services.pkl", "wb") as f: pickle.dump(services, f) turn_data_all = {'turns': all_data_turn, 'aintent2id': aintent2id, 'request2id': request2id} with open(self.turn_path, "wb") as f: pickle.dump(turn_data_all, f) print("Process time: ", time.time()-ptime) return all_data, intent2id, slot2id, turn_data_all if __name__ == "__main__": if not os.path.exists('e2e_dialogue/'): os.mkdir('e2e_dialogue/') if not os.path.exists('sgd_dialogue/'): os.mkdir('sgd_dialogue/') # e2e dataset data_path = "../raw_datasets/e2e_dialogue/" rawdata_path = "e2e_dialogue/dialogue_data_multi_with_slots.pkl" intent2id_path = "e2e_dialogue/intent2id_multi_with_tokens.pkl" slot2id_path = "e2e_dialogue/slot2id.pkl" data = E2EData(data_path, rawdata_path, intent2id_path, slot2id_path, done=False) # print(data.intent2id) # print(data.slot2id) # for utt, utt_ids, slot, slot_ids, intents in data.train_data[10]: # print(utt) # print(utt_ids) # print(slot) # print(slot_ids) # print(intents) # print('--------------') # for utt_ids, slot_ids, intents in data.train_data[10]: # print(utt_ids) # print(slot_ids) # print(intents) # print('--------------') # sgd dataset data_path = "../raw_datasets/dstc8-schema-guided-dialogue/train" rawdata_path = "sgd_dialogue/dialogue_data_multi_with_slots.pkl" intent2id_path = "sgd_dialogue/intent2id_multi_with_tokens.pkl" slot2id_path = "sgd_dialogue/slot2id.pkl" turn_path = "sgd_dialogue/turns.pkl" data = SGDData(data_path, rawdata_path, intent2id_path, slot2id_path, turn_path, done=False) # print(data.turn_data_all['turns'][0]) # print(data.train_data[100]) # print(data.intent2id) # print(data.slot2id) # for utt_token, utt_ids, slot_nums, slots_ids, intents in data.train_data[10]: # print(utt_token) # print(utt_ids) # print(slot_nums) # print(slots_ids) # print(intents) # print('--------------') # for utt_ids, slot_ids, intents in data.train_data[10]: # print(utt_ids) # print(slot_ids) # print(intents) # print('--------------')
22,069
38.837545
145
py
CaBERT-SLU
CaBERT-SLU-main/model/transformer_new.py
"""Transformer module with masks""" import torch import torch.nn as nn import numpy as np class ScaledDotProductAttention(nn.Module): """Scaled dot-product attention mechanism. """ def __init__(self, attention_dropout=0.0): super(ScaledDotProductAttention, self).__init__() self.dropout = nn.Dropout(attention_dropout) self.softmax = nn.Softmax(dim=2) def forward(self, q, k, v, cross1, cross2, scale=None, attn_mask=None): """ Args: q: Queries [B, L_q, D_q] k: Keys [B, L_k, D_k] v: Values [B, L_v, D_v] scale: 1/sqrt(dk) attn_mask: [B, L_q, L_k] Returns: context, attention """ attention = torch.bmm(q, k.transpose(1, 2)) if scale: attention = attention * scale attention = attention.masked_fill_(attn_mask, -np.inf) attention = self.softmax(attention) attention = self.dropout(attention) context = torch.bmm(attention, v) attention_score = torch.bmm(cross1, cross2.transpose(1,2)) return context, attention_score class Transformer(nn.Module): """Transformer module. """ def __init__(self, hidden_dim, model_dim=512, num_heads=8, dropout=0.0): super(Transformer, self).__init__() self.dim_per_head = model_dim // num_heads self.num_heads = num_heads self.linear_k = nn.Linear(hidden_dim, self.dim_per_head * num_heads) self.linear_v = nn.Linear(hidden_dim, self.dim_per_head * num_heads) self.linear_q = nn.Linear(hidden_dim, self.dim_per_head * num_heads) self.dot_product_attention = ScaledDotProductAttention(dropout) self.linear_final = nn.Linear(model_dim, model_dim) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(model_dim) # cross attention mechanism self.embed_k = nn.Linear(hidden_dim, 200) self.embed_q = nn.Linear(hidden_dim, 200) def forward(self, key, value, query, masks=None): # Padding mask: Input size: (B, T) len_q = masks.size(1) pad_mask = masks.eq(0) attn_mask = pad_mask.unsqueeze(1).expand(-1, len_q, -1) attn_mask1 = masks.unsqueeze(1).expand(-1, len_q, -1) attn_mask2 = masks.unsqueeze(2).expand(-1, -1, len_q) attn_mask3 = (attn_mask1*attn_mask2).eq(0) residual = query dim_per_head = self.dim_per_head num_heads = self.num_heads batch_size = key.size(0) # cross attention cross1 = self.embed_k(key) cross2 = self.embed_q(query) # linear projection key = self.linear_k(key) value = self.linear_v(value) query = self.linear_q(query) # split by heads key = key.view(batch_size * num_heads, -1, dim_per_head) value = value.view(batch_size * num_heads, -1, dim_per_head) query = query.view(batch_size * num_heads, -1, dim_per_head) attn_mask = attn_mask.repeat(num_heads, 1, 1) # scaled dot product attention scale = (key.size(-1) // num_heads) ** -0.5 context, attention = self.dot_product_attention( query, key, value, cross1, cross2, scale, attn_mask) # concat heads context = context.view(batch_size, -1, dim_per_head * num_heads) output = torch.cat([residual, context], dim=2) # average attention over head # attention = attention.view(batch_size, num_heads, len_q, len_q) # attention = torch.mean(attention, dim=1) attention = attention.masked_fill(attn_mask3, 0.) attention = nn.Softmax(dim=2)(attention) #print(attn_mask3[0]) # attention = attention*attn_mask3 return output, attention
3,804
32.672566
76
py
CaBERT-SLU
CaBERT-SLU-main/model/baseline_multi.py
import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import numpy as np from transformers import BertTokenizer, BertModel class MULTI(nn.Module): def __init__(self, opt, num_labels=2, num_slot_labels=10): super(MULTI, self).__init__() self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True) self.embedding = nn.Embedding(len(self.tokenizer.vocab), 64) self.rnn_sentence = nn.LSTM(input_size=64, hidden_size=64, bidirectional=True, batch_first=True, num_layers=1) self.decoder = AttnDecoderRNN(64, opt) self.slot_decoder = AttnDecoderRNN(64, opt) self.classifier1 = nn.Linear(128, num_labels) nn.init.xavier_normal_(self.classifier1.weight) self.classifier2 = nn.Linear(128, num_labels) nn.init.xavier_normal_(self.classifier2.weight) self.classifier_slot = nn.Linear(128, num_slot_labels) nn.init.xavier_normal_(self.classifier_slot.weight) #self.dropout = nn.Dropout(0.1) self.num_labels = num_labels self.num_slot_labels = num_slot_labels self.opt = opt def forward(self, x_inputs): # Encoder X = self.embedding(x_inputs) rnn_out, encoder_hidden = self.rnn_sentence(X) #rnn_out = self.dropout(rnn_out) logits = self.classifier1(rnn_out[:,-1,:]) encoder_logits = logits # Decoder decoder_hidden = encoder_hidden decoder_outputs = torch.zeros(*rnn_out.shape, device=self.device) for di in range(x_inputs.shape[1]): decoder_output, decoder_hidden = self.decoder(decoder_hidden, rnn_out, di) decoder_outputs[:,di,:] = decoder_output.squeeze(1) #decoder_outputs = self.dropout(decoder_outputs) decoder_logits = self.classifier2(decoder_outputs) # Slot Decoder decoder_hidden = encoder_hidden slot_outputs = torch.zeros(*rnn_out.shape, device=self.device) for di in range(x_inputs.shape[1]): decoder_output, decoder_hidden = self.slot_decoder(decoder_hidden, rnn_out, di) slot_outputs[:,di,:] = decoder_output.squeeze(1) #decoder_outputs = self.dropout(decoder_outputs) slot_logits = self.classifier_slot(slot_outputs) slot_logits = slot_logits.view(-1, self.num_slot_labels) return encoder_logits, decoder_logits, slot_logits class AttnDecoderRNN(nn.Module): def __init__(self, hidden_size, opt): super(AttnDecoderRNN, self).__init__() self.hidden_size = 64 self.max_length = opt.maxlen self.attn = nn.Linear(self.hidden_size * 4, 1) self.attn_combine = nn.Linear(self.hidden_size * 4, self.hidden_size) self.rnn_token = nn.LSTM(input_size=self.hidden_size, hidden_size=self.hidden_size, bidirectional=True, batch_first=True, num_layers=1) self.W = nn.Parameter(torch.zeros(self.hidden_size*2,1)) self.v = nn.Parameter(torch.zeros(1)) def forward(self, hidden, encoder_outputs, di): b, t, h = encoder_outputs.shape # repeat decoder hidden decoder_hidden = hidden[0].view(-1, 128) # (b,2h) hidden_repeat = decoder_hidden.unsqueeze(1) # (b,1,2h) hidden_repeat = hidden_repeat.repeat(1,t,1) # (b,t,2h) # attention attn_weights = self.attn(torch.cat((encoder_outputs, hidden_repeat), 2)) # (b,t,1) attn_weights = F.softmax(attn_weights, dim=1) # (b,t,1) attn_applied = torch.bmm(encoder_outputs.transpose(2,1), attn_weights).squeeze(2) # (b,2h) # # slot-gated: # print(attn_applied.shape) # print(encoder_outputs[:,-1,:].shape) # print(self.W.shape) # print(self.v.shape) # g = torch.sum(self.v * torch.tanh(attn_applied + encoder_outputs[:,-1,:] * self.W), dim=1) # (b,) # g = g.expand(dim=1).repeat(1,1,2*h) # (b,1) output = torch.cat((encoder_outputs[:,di,:], attn_applied), dim=1) # (b,4h) # linear layer output = self.attn_combine(output) # (b,h) output = F.relu(output) output = output.unsqueeze(1) # (b,1,h) output, hidden = self.rnn_token(output, hidden) return output, hidden
4,648
39.426087
107
py
CaBERT-SLU
CaBERT-SLU-main/model/CHAN.py
import os.path import math import copy import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import CrossEntropyLoss from torch.nn import CosineEmbeddingLoss ffscores = [] class MultiHeadAttention(nn.Module): def __init__(self, heads, d_model, dropout=0.1): super().__init__() self.d_model = d_model self.d_k = d_model // heads self.h = heads self.q_linear = nn.Linear(d_model, d_model) self.v_linear = nn.Linear(d_model, d_model) self.k_linear = nn.Linear(d_model, d_model) self.dropout = nn.Dropout(dropout) self.out = nn.Linear(d_model, d_model) self.scores = None def attention(self, q, k, v, d_k, mask=None, dropout=None): scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: mask = mask.unsqueeze(1) scores = scores.masked_fill(mask == 0, -1e9) scores = F.softmax(scores, dim=-1) if dropout is not None: scores = dropout(scores) self.scores = scores ffscores.append(self.scores.cpu()) output = torch.matmul(scores, v) return output def forward(self, q, k, v, mask=None): bs = q.size(0) # perform linear operation and split into h heads k = self.k_linear(k).view(bs, -1, self.h, self.d_k) q = self.q_linear(q).view(bs, -1, self.h, self.d_k) v = self.v_linear(v).view(bs, -1, self.h, self.d_k) # transpose to get dimensions bs * h * sl * d_model k = k.transpose(1, 2) q = q.transpose(1, 2) v = v.transpose(1, 2) scores = self.attention(q, k, v, self.d_k, mask, self.dropout) # concatenate heads and put through final linear layer concat = scores.transpose(1, 2).contiguous().view(bs, -1, self.d_model) output = self.out(concat) return output def get_scores(self): return self.scores def clones(module, N): "Produce N identical layers." return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) class SublayerConnection(nn.Module): """ A residual connection followed by a layer norm. Note for code simplicity the norm is first as opposed to last. """ def __init__(self, size, dropout): super(SublayerConnection, self).__init__() self.norm = nn.LayerNorm(size) self.dropout = nn.Dropout(dropout) def forward(self, x, sublayer): "Apply residual connection to any sublayer with the same size." return x + self.dropout(sublayer(self.norm(x))) class Encoder(nn.Module): "Generic N layer decoder with masking." def __init__(self, layer, N): super(Encoder, self).__init__() self.layers = clones(layer, N) self.norm = nn.LayerNorm(layer.size) def forward(self, x, mask): for layer in self.layers: x = layer(x, mask) return self.norm(x) class EncoderLayer(nn.Module): "Encoder is made up of self-attn and feed forward (defined below)" def __init__(self, size, self_attn, feed_forward, dropout): super(EncoderLayer, self).__init__() self.self_attn = self_attn self.feed_forward = feed_forward self.sublayer = clones(SublayerConnection(size, dropout), 2) self.size = size def forward(self, x, mask): "Follow Figure 1 (left) for connections." x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask)) return self.sublayer[1](x, self.feed_forward) def subsequent_mask(size): "Mask out subsequent positions." attn_shape = (1, size, size) subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8') return torch.from_numpy(subsequent_mask) == 0 class PositionwiseFeedForward(nn.Module): "Implements FFN equation." def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.w_2(self.dropout(F.relu(self.w_1(x)))) class PositionalEncoding(nn.Module): "Implement the PE function." def __init__(self, d_model, dropout, max_len=5000): super(PositionalEncoding, self).__init__() self.dropout = nn.Dropout(p=dropout) # Compute the positional encodings once in log space. pe = torch.zeros(max_len, d_model, requires_grad=False) position = torch.arange(0., max_len).unsqueeze(1) div_term = torch.exp(torch.arange(0., d_model, 2) * -(math.log(10000.0) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe) def forward(self, x): x = x + self.pe[:, :x.size(1)] return self.dropout(x) class ContextAttention(nn.Module): def __init__(self, device): super(ContextAttention, self).__init__() self.hidden_dim = 100 self.attn_head = 4 self.device = device ### Attention layer self.attn = MultiHeadAttention(self.attn_head, 768, dropout=0.) self.attn2 = MultiHeadAttention(self.attn_head, 768, dropout=0.) self.add_pe = PositionalEncoding(768, 0.) ### Belief Tracker self.nbt = Encoder(EncoderLayer(768, MultiHeadAttention(self.attn_head, 768, dropout=0.), PositionwiseFeedForward(768, self.hidden_dim, 0.), 0.1), N=6) def _make_aux_tensors(self, ids, len): token_type_ids = torch.zeros(ids.size(), dtype=torch.long).to(self.device) for i in range(len.size(0)): for j in range(len.size(1)): if len[i,j,0] == 0: # padding break elif len[i,j,1] > 0: # escape only text_a case start = len[i,j,0] ending = len[i,j,0] + len[i,j,1] token_type_ids[i, j, start:ending] = 1 attention_mask = ids > 0 return token_type_ids, attention_mask def forward(self, input_ids, result_masks): ds = input_ids.size(0) # dialog size ts = input_ids.size(1) # turn size hidden = self.add_pe(input_ids) # NBT turn_mask = torch.Tensor(ds, ts, ts).byte().to(self.device) for d in range(ds): padding_utter = (result_masks[d,:].sum(-1) != 0) turn_mask[d] = padding_utter.unsqueeze(0).repeat(ts,1) & subsequent_mask(ts).to(self.device) hidden = self.nbt(hidden, turn_mask) return hidden, ffscores
6,881
33.238806
104
py
CaBERT-SLU
CaBERT-SLU-main/model/baseline_eca.py
import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import numpy as np from transformers import BertTokenizer, BertModel class ECA(nn.Module): def __init__(self, opt, num_labels=2, num_slot_labels=10): super(ECA, self).__init__() self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True) self.embedding = nn.Embedding(len(self.tokenizer.vocab), 256) self.utterance_encoder = nn.LSTM(input_size=256, hidden_size=256, bidirectional=True, batch_first=True, num_layers=1) self.conversation_layer = nn.LSTM(input_size=512, hidden_size=256, bidirectional=True, batch_first=True, num_layers=1) self.dense1 = nn.Linear(512, 256) self.dense2 = nn.Linear(256, num_labels) self.slot_decoder = AttnDecoderRNN(256, opt) self.classifier_slot = nn.Linear(512, num_slot_labels) nn.init.xavier_normal_(self.classifier_slot.weight) #self.dropout = nn.Dropout(0.1) self.num_labels = num_labels self.num_slot_labels = num_slot_labels self.dropout = nn.Dropout(0.1) self.opt = opt def forward(self, result_ids, result_token_masks, result_masks, lengths, result_slot_labels, labels, y_caps, y_masks): # Utterance Encoder b,d,t = result_ids.shape result_ids = result_ids.view(-1, t) X = self.embedding(result_ids) rnn_out, encoder_hidden = self.utterance_encoder(X) # pooling & conversation pooled = rnn_out[:,-1,:].view(b,d,2*256) out, hidden = self.conversation_layer(pooled) out = self.dense1(out) logits = self.dense2(out) # Remove padding logits_no_pad = [] labels_no_pad = [] for i in range(b): logits_no_pad.append(logits[i,:lengths[i],:]) labels_no_pad.append(labels[i,:lengths[i],:]) logits = torch.cat(logits_no_pad, dim=0) labels = torch.cat(labels_no_pad, dim=0) # Slot Decoder decoder_hidden = encoder_hidden slot_outputs = torch.zeros(*rnn_out.shape, device=self.device) for di in range(t): decoder_output, decoder_hidden = self.slot_decoder(decoder_hidden, rnn_out, di) slot_outputs[:,di,:] = decoder_output.squeeze(1) #decoder_outputs = self.dropout(decoder_outputs) slot_outputs = self.dropout(slot_outputs) slot_logits = self.classifier_slot(slot_outputs) slot_logits = slot_logits.view(-1, self.num_slot_labels) return logits, labels, slot_logits class AttnDecoderRNN(nn.Module): def __init__(self, hidden_size, opt): super(AttnDecoderRNN, self).__init__() self.hidden_size = 256 self.max_length = opt.maxlen self.attn = nn.Linear(self.hidden_size * 4, 1) self.attn_combine = nn.Linear(self.hidden_size * 4, self.hidden_size) self.rnn_token = nn.LSTM(input_size=self.hidden_size, hidden_size=self.hidden_size, bidirectional=True, batch_first=True, num_layers=1) self.W = nn.Parameter(torch.zeros(self.hidden_size*2,1)) self.v = nn.Parameter(torch.zeros(1)) def forward(self, hidden, encoder_outputs, di): b, t, h = encoder_outputs.shape # repeat decoder hidden decoder_hidden = hidden[0].view(-1, 2*self.hidden_size) # (b,2h) hidden_repeat = decoder_hidden.unsqueeze(1) # (b,1,2h) hidden_repeat = hidden_repeat.repeat(1,t,1) # (b,t,2h) # attention attn_weights = self.attn(torch.cat((encoder_outputs, hidden_repeat), 2)) # (b,t,1) attn_weights = F.softmax(attn_weights, dim=1) # (b,t,1) attn_applied = torch.bmm(encoder_outputs.transpose(2,1), attn_weights).squeeze(2) # (b,2h) output = torch.cat((encoder_outputs[:,di,:], attn_applied), dim=1) # (b,4h) # linear layer output = self.attn_combine(output) # (b,h) output = F.relu(output) output = output.unsqueeze(1) # (b,1,h) output, hidden = self.rnn_token(output, hidden) return output, hidden
4,601
37.033058
122
py
CaBERT-SLU
CaBERT-SLU-main/model/transformer.py
"""Transformer module with masks""" import math import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import TransformerEncoder, TransformerEncoderLayer class TransformerModel(nn.Module): def __init__(self, ninp, nhead, nhid, nlayers, dropout=0.5): super(TransformerModel, self).__init__() self.model_type = 'Transformer' self.pos_encoder = PositionalEncoding(ninp, dropout) encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout) self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers) self.ninp = ninp self.decoder = nn.Linear(ninp, 256) self.init_weights() def generate_square_subsequent_mask(self, sz): mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1) mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)) return mask def init_weights(self): initrange = 0.1 self.decoder.bias.data.zero_() self.decoder.weight.data.uniform_(-initrange, initrange) def forward(self, src, src_mask): src = self.pos_encoder(src) output = self.transformer_encoder(src, src_mask) # output = self.decoder(output) return output class PositionalEncoding(nn.Module): def __init__(self, d_model, dropout=0.1, max_len=5000): super(PositionalEncoding, self).__init__() self.dropout = nn.Dropout(p=dropout) pe = torch.zeros(max_len, d_model) position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0).transpose(0, 1) self.register_buffer('pe', pe) def forward(self, x): x = x + self.pe[:x.size(0), :] return self.dropout(x)
1,979
36.358491
100
py
CaBERT-SLU
CaBERT-SLU-main/model/mia.py
import os.path import math import copy import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import CrossEntropyLoss from torch.nn import CosineEmbeddingLoss class MultiHeadAttention(nn.Module): def __init__(self, heads, d_model, dropout=0.1): super().__init__() self.d_model = d_model self.d_k = d_model // heads self.h = heads self.q_linear = nn.Linear(d_model, d_model) self.v_linear = nn.Linear(d_model, d_model) self.k_linear = nn.Linear(d_model, d_model) self.dropout = nn.Dropout(dropout) self.out = nn.Linear(d_model, d_model) self.scores = None def attention(self, q, k, v, d_k, mask=None, dropout=None): scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: mask = mask.unsqueeze(1) scores = scores.masked_fill(mask == 0, -1e9) scores = F.softmax(scores, dim=-1) if dropout is not None: scores = dropout(scores) self.scores = scores output = torch.matmul(scores, v) return output def forward(self, q, k, v, mask=None): bs = q.size(0) # perform linear operation and split into h heads k = self.k_linear(k).view(bs, -1, self.h, self.d_k) q = self.q_linear(q).view(bs, -1, self.h, self.d_k) v = self.v_linear(v).view(bs, -1, self.h, self.d_k) # transpose to get dimensions bs * h * sl * d_model k = k.transpose(1, 2) q = q.transpose(1, 2) v = v.transpose(1, 2) scores = self.attention(q, k, v, self.d_k, mask, self.dropout) # concatenate heads and put through final linear layer concat = scores.transpose(1, 2).contiguous().view(bs, -1, self.d_model) output = self.out(concat) return output def get_scores(self): return self.scores def clones(module, N): "Produce N identical layers." return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) class SublayerConnection(nn.Module): """ A residual connection followed by a layer norm. Note for code simplicity the norm is first as opposed to last. """ def __init__(self, size, dropout): super(SublayerConnection, self).__init__() self.norm = nn.LayerNorm(size) self.dropout = nn.Dropout(dropout) def forward(self, x, sublayer): "Apply residual connection to any sublayer with the same size." return self.norm(x[0] + self.dropout(sublayer(x))) class PositionwiseFeedForward(nn.Module): "Implements FFN equation." def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.w_2(self.dropout(F.relu(self.w_1(x)))) class EncoderLayer(nn.Module): "Encoder is made up of self-attn and feed forward (defined below)" def __init__(self, size, self_attn, feed_forward, dropout): super(EncoderLayer, self).__init__() self.self_attn = self_attn self.feed_forward = feed_forward self.sublayer = clones(SublayerConnection(size, dropout), 2) self.size = size def forward(self, *args): "Follow Figure 1 (left) for connections." x = self.sublayer[0](args, lambda x: self.self_attn(x[0], x[1], x[1])) return self.sublayer[1](x, self.feed_forward) class MutualIterativeAttention(nn.Module): def __init__(self, device): super(MutualIterativeAttention, self).__init__() self.hidden_dim = 100 self.attn_head = 4 self.N = 2 self.device = device self.layer_refine = EncoderLayer(768,MultiHeadAttention(self.attn_head, 768, dropout=0.), PositionwiseFeedForward(768, self.hidden_dim, 0.), 0.1) def forward(self, enc_intent, enc_slot): for i in range(self.N): # Refining intent enc_intent = self.layer_refine( enc_slot, enc_intent ) # Refining slot enc_slot = self.layer_refine( enc_intent, enc_slot ) # SGIR = self.layer_norm( Refine_T + enc_slot ) # SGIR: Semantic-Grounded Image Representations return enc_slot
4,445
32.428571
103
py
CaBERT-SLU
CaBERT-SLU-main/model/__init__.py
from .bert_model_context import BertContextNLU from .baseline_multi import MULTI from .baseline_eca import ECA
111
27
46
py
CaBERT-SLU
CaBERT-SLU-main/model/torchcrf.py
"""SOURCE CODE FROM PYTORCH-CRF """ from typing import List, Optional import torch import torch.nn as nn class CRF(nn.Module): """Conditional random field. This module implements a conditional random field [LMP01]_. The forward computation of this class computes the log likelihood of the given sequence of tags and emission score tensor. This class also has `~CRF.decode` method which finds the best tag sequence given an emission score tensor using `Viterbi algorithm`_. Args: num_tags: Number of tags. batch_first: Whether the first dimension corresponds to the size of a minibatch. Attributes: start_transitions (`~torch.nn.Parameter`): Start transition score tensor of size ``(num_tags,)``. end_transitions (`~torch.nn.Parameter`): End transition score tensor of size ``(num_tags,)``. transitions (`~torch.nn.Parameter`): Transition score tensor of size ``(num_tags, num_tags)``. .. [LMP01] Lafferty, J., McCallum, A., Pereira, F. (2001). "Conditional random fields: Probabilistic models for segmenting and labeling sequence data". *Proc. 18th International Conf. on Machine Learning*. Morgan Kaufmann. pp. 282–289. .. _Viterbi algorithm: https://en.wikipedia.org/wiki/Viterbi_algorithm """ def __init__(self, num_tags: int, batch_first: bool = False) -> None: if num_tags <= 0: raise ValueError(f'invalid number of tags: {num_tags}') super().__init__() self.num_tags = num_tags self.batch_first = batch_first self.start_transitions = nn.Parameter(torch.empty(num_tags)) self.end_transitions = nn.Parameter(torch.empty(num_tags)) self.transitions = nn.Parameter(torch.empty(num_tags, num_tags)) self.reset_parameters() def reset_parameters(self) -> None: """Initialize the transition parameters. The parameters will be initialized randomly from a uniform distribution between -0.1 and 0.1. """ nn.init.uniform_(self.start_transitions, -0.1, 0.1) nn.init.uniform_(self.end_transitions, -0.1, 0.1) nn.init.uniform_(self.transitions, -0.1, 0.1) def __repr__(self) -> str: return f'{self.__class__.__name__}(num_tags={self.num_tags})' def forward( self, emissions: torch.Tensor, tags: torch.LongTensor, mask: Optional[torch.ByteTensor] = None, reduction: str = 'sum', ) -> torch.Tensor: """Compute the conditional log likelihood of a sequence of tags given emission scores. Args: emissions (`~torch.Tensor`): Emission score tensor of size ``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``, ``(batch_size, seq_length, num_tags)`` otherwise. tags (`~torch.LongTensor`): Sequence of tags tensor of size ``(seq_length, batch_size)`` if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise. mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)`` if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise. reduction: Specifies the reduction to apply to the output: ``none|sum|mean|token_mean``. ``none``: no reduction will be applied. ``sum``: the output will be summed over batches. ``mean``: the output will be averaged over batches. ``token_mean``: the output will be averaged over tokens. Returns: `~torch.Tensor`: The log likelihood. This will have size ``(batch_size,)`` if reduction is ``none``, ``()`` otherwise. """ self._validate(emissions, tags=tags, mask=mask) if reduction not in ('none', 'sum', 'mean', 'token_mean'): raise ValueError(f'invalid reduction: {reduction}') if mask is None: mask = torch.ones_like(tags, dtype=torch.uint8) if self.batch_first: emissions = emissions.transpose(0, 1) tags = tags.transpose(0, 1) mask = mask.transpose(0, 1) # shape: (batch_size,) numerator = self._compute_score(emissions, tags, mask) # shape: (batch_size,) denominator = self._compute_normalizer(emissions, mask) # shape: (batch_size,) llh = numerator - denominator if reduction == 'none': return llh if reduction == 'sum': return llh.sum() if reduction == 'mean': return llh.mean() assert reduction == 'token_mean' return llh.sum() / mask.float().sum() def decode(self, emissions: torch.Tensor, mask: Optional[torch.ByteTensor] = None) -> List[List[int]]: """Find the most likely tag sequence using Viterbi algorithm. Args: emissions (`~torch.Tensor`): Emission score tensor of size ``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``, ``(batch_size, seq_length, num_tags)`` otherwise. mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)`` if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise. Returns: List of list containing the best tag sequence for each batch. """ self._validate(emissions, mask=mask) if mask is None: mask = emissions.new_ones(emissions.shape[:2], dtype=torch.uint8) if self.batch_first: emissions = emissions.transpose(0, 1) mask = mask.transpose(0, 1) return self._viterbi_decode(emissions, mask) def _validate( self, emissions: torch.Tensor, tags: Optional[torch.LongTensor] = None, mask: Optional[torch.ByteTensor] = None) -> None: if emissions.dim() != 3: raise ValueError(f'emissions must have dimension of 3, got {emissions.dim()}') if emissions.size(2) != self.num_tags: raise ValueError( f'expected last dimension of emissions is {self.num_tags}, ' f'got {emissions.size(2)}') if tags is not None: if emissions.shape[:2] != tags.shape: raise ValueError( 'the first two dimensions of emissions and tags must match, ' f'got {tuple(emissions.shape[:2])} and {tuple(tags.shape)}') if mask is not None: if emissions.shape[:2] != mask.shape: raise ValueError( 'the first two dimensions of emissions and mask must match, ' f'got {tuple(emissions.shape[:2])} and {tuple(mask.shape)}') no_empty_seq = not self.batch_first and mask[0].all() no_empty_seq_bf = self.batch_first and mask[:, 0].all() if not no_empty_seq and not no_empty_seq_bf: raise ValueError('mask of the first timestep must all be on') def _compute_score( self, emissions: torch.Tensor, tags: torch.LongTensor, mask: torch.ByteTensor) -> torch.Tensor: # emissions: (seq_length, batch_size, num_tags) # tags: (seq_length, batch_size) # mask: (seq_length, batch_size) assert emissions.dim() == 3 and tags.dim() == 2 assert emissions.shape[:2] == tags.shape assert emissions.size(2) == self.num_tags assert mask.shape == tags.shape assert mask[0].all() seq_length, batch_size = tags.shape mask = mask.float() # Start transition score and first emission # shape: (batch_size,) score = self.start_transitions[tags[0]] score += emissions[0, torch.arange(batch_size), tags[0]] for i in range(1, seq_length): # Transition score to next tag, only added if next timestep is valid (mask == 1) # shape: (batch_size,) score += self.transitions[tags[i - 1], tags[i]] * mask[i] # Emission score for next tag, only added if next timestep is valid (mask == 1) # shape: (batch_size,) score += emissions[i, torch.arange(batch_size), tags[i]] * mask[i] # End transition score # shape: (batch_size,) seq_ends = mask.long().sum(dim=0) - 1 # shape: (batch_size,) last_tags = tags[seq_ends, torch.arange(batch_size)] # shape: (batch_size,) score += self.end_transitions[last_tags] return score def _compute_normalizer( self, emissions: torch.Tensor, mask: torch.ByteTensor) -> torch.Tensor: # emissions: (seq_length, batch_size, num_tags) # mask: (seq_length, batch_size) assert emissions.dim() == 3 and mask.dim() == 2 assert emissions.shape[:2] == mask.shape assert emissions.size(2) == self.num_tags assert mask[0].all() seq_length = emissions.size(0) # Start transition score and first emission; score has size of # (batch_size, num_tags) where for each batch, the j-th column stores # the score that the first timestep has tag j # shape: (batch_size, num_tags) score = self.start_transitions + emissions[0] for i in range(1, seq_length): # Broadcast score for every possible next tag # shape: (batch_size, num_tags, 1) broadcast_score = score.unsqueeze(2) # Broadcast emission score for every possible current tag # shape: (batch_size, 1, num_tags) broadcast_emissions = emissions[i].unsqueeze(1) # Compute the score tensor of size (batch_size, num_tags, num_tags) where # for each sample, entry at row i and column j stores the sum of scores of all # possible tag sequences so far that end with transitioning from tag i to tag j # and emitting # shape: (batch_size, num_tags, num_tags) next_score = broadcast_score + self.transitions + broadcast_emissions # Sum over all possible current tags, but we're in score space, so a sum # becomes a log-sum-exp: for each sample, entry i stores the sum of scores of # all possible tag sequences so far, that end in tag i # shape: (batch_size, num_tags) next_score = torch.logsumexp(next_score, dim=1) # Set score to the next score if this timestep is valid (mask == 1) # shape: (batch_size, num_tags) score = torch.where(mask[i].unsqueeze(1), next_score, score) # End transition score # shape: (batch_size, num_tags) score += self.end_transitions # Sum (log-sum-exp) over all possible tags # shape: (batch_size,) return torch.logsumexp(score, dim=1) def _viterbi_decode(self, emissions: torch.FloatTensor, mask: torch.ByteTensor) -> List[List[int]]: # emissions: (seq_length, batch_size, num_tags) # mask: (seq_length, batch_size) assert emissions.dim() == 3 and mask.dim() == 2 assert emissions.shape[:2] == mask.shape assert emissions.size(2) == self.num_tags assert mask[0].all() seq_length, batch_size = mask.shape # Start transition and first emission # shape: (batch_size, num_tags) score = self.start_transitions + emissions[0] history = [] # score is a tensor of size (batch_size, num_tags) where for every batch, # value at column j stores the score of the best tag sequence so far that ends # with tag j # history saves where the best tags candidate transitioned from; this is used # when we trace back the best tag sequence # Viterbi algorithm recursive case: we compute the score of the best tag sequence # for every possible next tag for i in range(1, seq_length): # Broadcast viterbi score for every possible next tag # shape: (batch_size, num_tags, 1) broadcast_score = score.unsqueeze(2) # Broadcast emission score for every possible current tag # shape: (batch_size, 1, num_tags) broadcast_emission = emissions[i].unsqueeze(1) # Compute the score tensor of size (batch_size, num_tags, num_tags) where # for each sample, entry at row i and column j stores the score of the best # tag sequence so far that ends with transitioning from tag i to tag j and emitting # shape: (batch_size, num_tags, num_tags) next_score = broadcast_score + self.transitions + broadcast_emission # Find the maximum score over all possible current tag # shape: (batch_size, num_tags) next_score, indices = next_score.max(dim=1) # Set score to the next score if this timestep is valid (mask == 1) # and save the index that produces the next score # shape: (batch_size, num_tags) score = torch.where(mask[i].unsqueeze(1), next_score, score) history.append(indices) # End transition score # shape: (batch_size, num_tags) score += self.end_transitions # Now, compute the best path for each sample # shape: (batch_size,) seq_ends = mask.long().sum(dim=0) - 1 best_tags_list = [] for idx in range(batch_size): # Find the tag which maximizes the score at the last timestep; this is our best tag # for the last timestep _, best_last_tag = score[idx].max(dim=0) best_tags = [best_last_tag.item()] # We trace back where the best last tag comes from, append that to our best tag # sequence, and trace it back again, and so on for hist in reversed(history[:seq_ends[idx]]): best_last_tag = hist[idx][best_tags[-1]] best_tags.append(best_last_tag.item()) # Reverse the order because we start from the last timestep best_tags.reverse() best_tags_list.append(best_tags) return best_tags_list
14,331
43.098462
95
py
CaBERT-SLU
CaBERT-SLU-main/model/bert_model_context.py
import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import numpy as np from transformers import BertTokenizer, BertModel from model.transformer import TransformerModel from model.transformer_new import Transformer from model.CHAN import ContextAttention from model.torchcrf import CRF from model.mia import MutualIterativeAttention class BertContextNLU(nn.Module): def __init__(self, config, opt, num_labels=2, num_slot_labels=144): super(BertContextNLU, self).__init__() self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') self.num_labels = num_labels self.num_slot_labels = num_slot_labels self.bert = BertModel.from_pretrained('bert-base-uncased', output_hidden_states=True, output_attentions=True) self.dropout = nn.Dropout(0.1) self.hidden_size = config.hidden_size self.rnn_hidden = opt.rnn_hidden ######################################### # transformer self.transformer_model = TransformerModel(ninp=self.hidden_size, nhead=4, nhid=64, nlayers=2, dropout=0.1) self.transformer_encoder = Transformer(hidden_dim=self.hidden_size, model_dim=256, num_heads=2, dropout=0.1) # DiSAN self.conv1 = nn.Conv1d(self.hidden_size, self.hidden_size, 3, padding=1) self.conv2 = nn.Conv1d(self.hidden_size, self.hidden_size, 3, padding=1) self.fc1 = nn.Linear(2*self.hidden_size, self.rnn_hidden) # CHAN self.context_encoder = ContextAttention(self.device) # rnn self.rnn = nn.LSTM(input_size=self.hidden_size, hidden_size=self.rnn_hidden, batch_first=True, num_layers=1) # classifier self.classifier_rnn = nn.Linear(self.rnn_hidden, num_labels) nn.init.xavier_normal_(self.classifier_rnn.weight) self.classifier_bert = nn.Linear(self.hidden_size, num_labels) nn.init.xavier_normal_(self.classifier_bert.weight) self.classifier_transformer = nn.Linear(self.rnn_hidden*4, num_labels) nn.init.xavier_normal_(self.classifier_transformer.weight) # label embedding self.clusters = nn.Parameter(torch.randn(num_labels, config.hidden_size).float(), requires_grad=True) self.mapping = nn.Linear(config.hidden_size, self.rnn_hidden) # slot prediction self.slot_rnn = nn.LSTM(input_size=self.hidden_size+self.rnn_hidden, hidden_size=self.rnn_hidden, batch_first=True, bidirectional=True, num_layers=1) self.slot_classifier = nn.Linear(2*self.rnn_hidden, num_slot_labels) self.crf = CRF(self.num_slot_labels) # mutual iterative attention self.mia_encoder = MutualIterativeAttention(self.device) # self attentive self.linear1 = nn.Linear(config.hidden_size, 256) self.linear2 = nn.Linear(4*256, config.hidden_size) self.tanh = nn.Tanh() self.context_vector = nn.Parameter(torch.randn(256, 4), requires_grad=True) def self_attentive(self, last_hidden_states, d, b): # input should be (b,d,h) vectors = self.context_vector.unsqueeze(0).repeat(b*d, 1, 1) h = self.linear1(last_hidden_states) # (b*d, t, h) scores = torch.bmm(h, vectors) # (b*d, t, 4) scores = nn.Softmax(dim=1)(scores) # (b*d, t, 4) outputs = torch.bmm(scores.permute(0, 2, 1), h).view(b*d, -1) # (b*d, 4h) pooled_output = self.linear2(outputs) # (b*d, h) pooled_output = pooled_output.view(b,d,self.hidden_size) # (b,d,h) return pooled_output def mha(self, pooled_output, d, b): # input should be (d,b,h) pooled_output = pooled_output.view(d,b,self.hidden_size) # src_mask = self.transformer_model.generate_square_subsequent_mask(d).to(self.device) pooled_output = self.transformer_model(pooled_output, src_mask=None) pooled_output = pooled_output.view(b,d,self.hidden_size) return pooled_output def label_embed(self, y_caps, y_masks, rnn_out, d, b): last_hidden, clusters, hidden, att = self.bert(y_caps, attention_mask=y_masks) # clusters = self.mapping(clusters) # (n, 256) gram = torch.mm(clusters, clusters.permute(1,0)) # (n, n) rnn_out = rnn_out.reshape(b*d, self.hidden_size) # (b*d, 768) weights = torch.mm(rnn_out, clusters.permute(1,0)) # (b*d, n) logits = torch.mm(weights, torch.inverse(gram)) logits = logits.view(b,d,self.num_labels) return logits def DiSAN(self, pooled_output, d, b): # input should be (b,h,d) pooled_score = pooled_output.view(b,self.hidden_size,d) pooled_score = torch.sigmoid(self.conv1(pooled_score)) pooled_score = self.conv2(pooled_score) pooled_score = F.softmax(pooled_score, dim=-1) pooled_score = pooled_score.view(b,d,self.hidden_size) pooled_output = pooled_score * pooled_output return pooled_output def forward(self, result_ids, result_token_masks, result_masks, lengths, result_slot_labels, labels, y_caps, y_masks): """ Inputs: result_ids: (b, d, t) result_token_masks: (b, d, t) result_masks: (b, d) lengths: (b) result_slot_labels: (b, d, t) labels: (b, d, l) BERT outputs: last_hidden_states: (bxd, t, h) pooled_output: (bxd, h), from output of a linear classifier + tanh hidden_states: 13 x (bxd, t, h), embed to last layer embedding attentions: 12 x (bxd, num_heads, t, t) """ # BERT encoding b,d,t = result_ids.shape result_ids = result_ids.view(-1, t) result_token_masks = result_token_masks.view(-1, t) last_hidden_states, pooled_output, hidden_states, attentions = self.bert(result_ids, attention_mask=result_token_masks) pooled_output = pooled_output.view(b,d,self.hidden_size) ## Token: Self-attentive pooled_output = self.self_attentive(last_hidden_states, d, b) # (b,d,l) # logits = self.classifier_bert(pooled_output) ## Turn: MHA # pooled_output = self.mha(pooled_output, d, b) # (b,d,l) ## Turn: DiSAN # context_vector = self.DiSAN(pooled_output, d, b) # final_hidden = torch.cat([pooled_output, context_vector], dim=-1) # final_hidden = self.fc1(final_hidden) # logits = self.classifier_rnn(final_hidden) ## Turn: CHAN pooled_output, ffscores = self.context_encoder(pooled_output, result_masks) # logits = self.classifier_bert(pooled_output) # (b,d,l) ## Turn: transformer # transformer_out, attention = self.transformer_encoder(pooled_output, pooled_output, pooled_output, result_masks) # transformer_out = self.dropout(transformer_out) # logits = self.classifier_transformer(transformer_out) # (b,d,l) ## Prediction: RNN rnn_out, _ = self.rnn(pooled_output) rnn_out = self.dropout(rnn_out) logits = self.classifier_rnn(rnn_out) # (b,d,l) ## Prediction: Label Embedding # logits = self.label_embed(y_caps, y_masks, pooled_output, d, b) # Remove padding logits_no_pad = [] labels_no_pad = [] for i in range(b): logits_no_pad.append(logits[i,:lengths[i],:]) labels_no_pad.append(labels[i,:lengths[i],:]) logits = torch.cat(logits_no_pad, dim=0) labels = torch.cat(labels_no_pad, dim=0) ####### # slot prediction slot_vectors = last_hidden_states # (b*d,t,h) intent_context = rnn_out.unsqueeze(2).repeat(1,1,t,1).reshape(-1,t,self.rnn_hidden) # (b*d,t,hr) # comia # intent_context = pooled_output.unsqueeze(2) # slot_refined = self.mia_encoder(intent_context, slot_vectors) slot_inputs = torch.cat([slot_vectors, intent_context], dim=-1) # (b*d,t,h+hr) slot_rnn_out, _ = self.slot_rnn(slot_inputs) slot_rnn_out = self.dropout(slot_rnn_out) slot_out = self.slot_classifier(slot_rnn_out) slot_out = slot_out.view(-1, self.num_slot_labels) # (b*d*t, num_slots) # slot_loss = -self.crf(slot_out, result_slot_labels) return logits, labels, slot_out#, ffscores
8,750
41.072115
127
py
conversational-datasets
conversational-datasets-master/tools/tfrutil.py
# -*- coding: utf-8 -*- """Command line utilities for manipulating tfrecords files. Usage: To count the number of examples in a tfrecord file: python tfrutil.py size train-00999-of-01000.tfrecords To sample 10000 examples from a file pattern to an output file: python tfrutil.py sample 10000 train-*-of-01000.tfrecords \ train-sampled.tfrecords To pretty print the contents of a tfrecord file: python tfrutil.py pp train-00999-of-01000.tfrecords This can accept gs:// file paths, as well as local files. """ import codecs import random import sys import click import six import tensorflow as tf @click.group() def _cli(): """Command line utilities for manipulating tfrecords files.""" pass @_cli.command(name="size") @click.argument("path", type=str, required=True, nargs=1) def _size(path): """Compute the number of examples in the input tfrecord file.""" i = 0 for _ in tf.python_io.tf_record_iterator(path): i += 1 print(i) @_cli.command(name="sample") @click.argument("sample_size", type=int, required=True, nargs=1) @click.argument("file_patterns", type=str, required=True, nargs=-1) @click.argument("out", type=str, required=True, nargs=1) def _sample(sample_size, file_patterns, out): file_paths = [] for file_pattern in file_patterns: file_paths += tf.gfile.Glob(file_pattern) random.shuffle(file_paths) # Try to read twice as many examples as requested from the files, reading # the files in a random order. buffer_size = int(2 * sample_size) examples = [] for file_name in file_paths: for example in tf.python_io.tf_record_iterator(file_name): examples.append(example) if len(examples) == buffer_size: break if len(examples) == buffer_size: break if len(examples) < sample_size: tf.logging.warning( "Not enough examples to sample from. Found %i but requested %i.", len(examples), sample_size, ) sampled_examples = examples else: sampled_examples = random.sample(examples, sample_size) with tf.python_io.TFRecordWriter(out) as record_writer: for example in sampled_examples: record_writer.write(example) print("Wrote %i examples to %s." % (len(sampled_examples), out)) @_cli.command(name="pp") @click.argument("path", type=str, required=True, nargs=1) def _pretty_print(path): """Format and print the contents of the tfrecord file to stdout.""" for i, record in enumerate(tf.python_io.tf_record_iterator(path)): example = tf.train.Example() example.ParseFromString(record) print("Example %i\n--------" % i) _pretty_print_example(example) print("--------\n\n") def _pretty_print_example(example): """Format and print an individual tensorflow example.""" _print_field("Context", _get_string_feature(example, "context")) _print_field("Response", _get_string_feature(example, "response")) _print_extra_contexts(example) _print_other_features(example) def _print_field(name, content, indent=False): indent_str = "\t" if indent else "" content = content.replace("\n", "\\n ") print("%s[%s]:" % (indent_str, name)) print("%s\t%s" % (indent_str, content)) def _get_string_feature(example, feature_name): return example.features.feature[feature_name].bytes_list.value[0].decode( "utf-8") def _print_extra_contexts(example): """Print the extra context features.""" extra_contexts = [] i = 0 while True: feature_name = "context/{}".format(i) try: value = _get_string_feature(example, feature_name) except IndexError: break extra_contexts.append((feature_name, value)) i += 1 if not extra_contexts: return print("\nExtra Contexts:") for feature_name, value in reversed(extra_contexts): _print_field(feature_name, value, indent=True) def _print_other_features(example): """Print the other features, which will depend on the dataset. For now, only support string features. """ printed_header = False for feature_name, value in sorted(example.features.feature.items()): if (feature_name in {"context", "response"} or feature_name.startswith("context/")): continue if not printed_header: # Only print the header if there are other features in this # example. print("\nOther features:") printed_header = True _print_field( feature_name, value.bytes_list.value[0].decode("utf-8"), indent=True) if __name__ == "__main__": if six.PY2: sys.stdout = codecs.getwriter("utf8")(sys.stdout) _cli()
4,832
28.469512
77
py
conversational-datasets
conversational-datasets-master/tools/__init__.py
0
0
0
py
conversational-datasets
conversational-datasets-master/reddit/create_data.py
"""A Dataflow script for creating datasets from reddit. For usage see README.md. """ import argparse import hashlib import json import logging import os import re import uuid from collections import defaultdict, namedtuple from functools import partial import apache_beam as beam import tensorflow as tf from apache_beam import pvalue from apache_beam.io import BigQuerySource, Read from apache_beam.io.textio import WriteToText from apache_beam.io.tfrecordio import WriteToTFRecord from apache_beam.options.pipeline_options import PipelineOptions, SetupOptions _TF_FORMAT = "TF" _JSON_FORMAT = "JSON" def _parse_args(argv=None): """Parse command line arguments.""" def _positive_int(value): """Define a positive integer ArgumentParser type.""" value = int(value) if value <= 0: raise argparse.ArgumentTypeError( "Value must be positive, {} was passed.".format(value)) return value parser = argparse.ArgumentParser() parser.add_argument( "--reddit_table", required=True, help="The BigQuery table to read comments from, in " "project:table format.", ) parser.add_argument( "--output_dir", required=True, help="Google cloud storage output directory to write the dataset.", ) parser.add_argument( "--dataset_format", choices={_TF_FORMAT, _JSON_FORMAT}, default="TF", help="The dataset format to write. 'TF' for serialized tensorflow " "examples in TFRecords. 'JSON' for text files with one JSON " "object per line." ) parser.add_argument( "--parent_depth", type=_positive_int, default=10, help="How many parent comments to consider.", ) parser.add_argument( "--max_length", type=_positive_int, default=127, help="Maximum length of comments to include.", ) parser.add_argument( "--min_length", type=_positive_int, default=9, help="Minimum length of comments to include.", ) parser.add_argument( "--train_split", default=0.9, type=float, help="The proportion of data to put in the training set.", ) parser.add_argument( "--num_shards_test", default=100, type=_positive_int, help="The number of shards for the test set.", ) parser.add_argument( "--num_shards_train", default=1000, type=_positive_int, help="The number of shards for the train set.", ) return parser.parse_known_args(argv) # Represent a reddit comment. Comment = namedtuple( "Comment", [ "id", "thread_id", "parent_id", "body", "body_is_trimmed", "author", "subreddit", ] ) def normalise_comment(comment, max_length): """Create a _Comment object from a row in the BigQuery table.""" return Comment( id=comment['id'], thread_id=_normalise_id(comment['link_id']), parent_id=_normalise_id(comment['parent_id']), body=trim(comment['body'], max_length), body_is_trimmed=len(comment['body']) > max_length, author=comment['author'], subreddit=comment['subreddit'], ) def _normalise_id(raw_id): """Reddit IDs start with t1_, t2_, etc. which need to be stripped.""" return re.sub("^t[0-9]_", "", raw_id) def trim(text, max_length): """Trims text to be at most `max_length`, without splitting apart words.""" if len(text) <= max_length: return text text = text[:max_length + 1] # Trim until the last two characters are the boundary between an # alphanumeric character, and a non-alphanumeric character. while len(text) > 1 and (text[-1].isalnum() == text[-2].isalnum()): text = text[:-1] return text[:-1] def _should_skip(comment, min_length): if comment.body_is_trimmed: return True if comment.body in {"[deleted]", "[removed]"}: return True if len(comment.body) < min_length: return True return False def create_examples(thread, parent_depth, min_length, format): """Creates serialized tensorflow examples from a reddit thread.""" id_to_comment = {comment.id: comment for comment in list(thread)} for linear_path in linear_paths(id_to_comment, parent_depth): response = id_to_comment[linear_path[-1]] context = id_to_comment[linear_path[-2]] # guaranteed to exist. if (_should_skip(response, min_length) or _should_skip(context, min_length)): continue example = {} example['subreddit'] = response.subreddit example['thread_id'] = response.thread_id example['context_author'] = context.author example['response_author'] = response.author example['context'] = context.body example['response'] = response.body for i in range(parent_depth - 1): # Extra contexts start at index -3. index = -3 - i try: context_i = linear_path[index] except IndexError: break example['context/{}'.format(i)] = id_to_comment[context_i].body yield example def _features_to_serialized_tf_example(features): """Convert a string dict to a serialized TF example. The dictionary maps feature names (strings) to feature values (strings). """ example = tf.train.Example() for feature_name, feature_value in features.items(): example.features.feature[feature_name].bytes_list.value.append( feature_value.encode("utf-8")) return example.SerializeToString() def linear_paths(id_to_comment, parent_depth): """Gets all linear paths of comments and replies from the thread. Each linear path is guaranteed to have at least two comments in it. """ paths = [] seen_ids = set() id_to_children = defaultdict(list) for comment_id, comment in id_to_comment.items(): id_to_children[comment.parent_id].append(comment_id) if comment.parent_id not in id_to_comment: paths.append([comment_id]) seen_ids.add(comment_id) while paths: new_paths = [] for path in paths: last_id = path[-1] for child_id in id_to_children[last_id]: if child_id in seen_ids: # Prevent infinite loops. continue seen_ids.add(child_id) new_path = path[-parent_depth:] + [child_id] new_paths.append(new_path) yield new_path paths = new_paths def _shuffle(pcollection): """Shuffles the input pcollection.""" pcollection |= "add random key" >> beam.Map( lambda value: (uuid.uuid4(), value)) pcollection |= "group by key" >> beam.GroupByKey() pcollection |= "get shuffled values" >> beam.FlatMap(lambda t: t[1]) return pcollection class _TrainTestSplitFn(beam.DoFn): """Splits an input PCollection of examples into train and test. This uses the thread id to compute the split, so that examples from the same thread are in the same set. The split is deterministic based on thread id, so that multiple runs produce the same result. """ TRAIN_TAG = "train" TEST_TAG = "test" def __init__(self, train_split, num_buckets=4096): super(_TrainTestSplitFn, self).__init__() self._train_split = train_split self._num_buckets = num_buckets def process(self, example): split_value = self._split_value(example['thread_id']) split = ( self.TRAIN_TAG if split_value < self._train_split else self.TEST_TAG) yield pvalue.TaggedOutput(split, example) def _split_value(self, thread_id): """Compute a value from 0 to 1 used to compute the split.""" md5 = hashlib.md5() md5.update(thread_id) md5_digest = int(md5.hexdigest(), 16) return ( (1 + md5_digest % self._num_buckets) / float(self._num_buckets) ) def run(argv=None, comments=None): """Run the beam pipeline. Args: argv: (optional) the command line flags to parse. comments_collection: (optional) a list of comment JSON objects to process. Used in unit-tests to avoid requiring a BigQuery source. """ args, pipeline_args = _parse_args(argv) pipeline_options = PipelineOptions(pipeline_args) pipeline_options.view_as(SetupOptions).save_main_session = True p = beam.Pipeline(options=pipeline_options) if comments is not None: comments = p | ("Read in-memory comments") >> beam.Create(comments) else: comments = p | ("Read " + args.reddit_table) >> Read( BigQuerySource(args.reddit_table)) comments |= ( "Normalise comments" >> beam.Map( partial(normalise_comment, max_length=args.max_length))) thread_id_to_comments = comments | ( "Key by thread id" >> beam.Map( lambda comment: (comment.thread_id, comment))) threads = thread_id_to_comments | ( "Group comments by thread ID" >> beam.GroupByKey()) threads = threads | ("Get threads" >> beam.Map(lambda t: t[1])) examples = threads | ( "Create {} examples".format(args.dataset_format) >> beam.FlatMap( partial(create_examples, parent_depth=args.parent_depth, min_length=args.min_length, format=args.dataset_format, ))) examples = _shuffle(examples) examples |= "split train and test" >> beam.ParDo( _TrainTestSplitFn(train_split=args.train_split) ).with_outputs(_TrainTestSplitFn.TEST_TAG, _TrainTestSplitFn.TRAIN_TAG) if args.dataset_format == _JSON_FORMAT: write_sink = WriteToText file_name_suffix = ".json" serialize_fn = json.dumps else: assert args.dataset_format == _TF_FORMAT write_sink = WriteToTFRecord file_name_suffix = ".tfrecord" serialize_fn = _features_to_serialized_tf_example for name, tag in [("train", _TrainTestSplitFn.TRAIN_TAG), ("test", _TrainTestSplitFn.TEST_TAG)]: serialized_examples = examples[tag] | ( "serialize {} examples".format(name) >> beam.Map(serialize_fn)) ( serialized_examples | ("write " + name) >> write_sink( os.path.join(args.output_dir, name), file_name_suffix=file_name_suffix, num_shards=args.num_shards_train, ) ) result = p.run() result.wait_until_finish() if __name__ == "__main__": logging.getLogger().setLevel(logging.INFO) run()
10,906
30.341954
79
py
conversational-datasets
conversational-datasets-master/reddit/__init__.py
0
0
0
py
conversational-datasets
conversational-datasets-master/reddit/create_data_test.py
"""Tests for create_data.py.""" import copy import json import shutil import tempfile import unittest from glob import glob from os import path import tensorflow as tf from reddit import create_data class CreateDataPipelineTest(unittest.TestCase): """Test running the pipeline end-to-end.""" def setUp(self): self._temp_dir = tempfile.mkdtemp() self.maxDiff = None def tearDown(self): shutil.rmtree(self._temp_dir) def test_run(self): with open("reddit/testdata/simple_thread.json") as f: comments = json.loads(f.read()) # Duplicate the thread with a different ID, chosing a link_id that # will be put in the test set. test_comments = [] for comment in comments: test_comment = copy.copy(comment) test_comment['link_id'] = "t3_testthread" test_comments.append(test_comment) create_data.run( argv=[ "--runner=DirectRunner", "--reddit_table=ignored", "--output_dir=" + self._temp_dir, "--dataset_format=TF", "--num_shards_test=2", "--num_shards_train=2", "--min_length=4", "--max_length=5", "--train_split=0.5", ], comments=(comments + test_comments) ) self.assertItemsEqual( [path.join(self._temp_dir, expected_file) for expected_file in ["train-00000-of-00002.tfrecord", "train-00001-of-00002.tfrecord"]], glob(path.join(self._temp_dir, "train-*")) ) self.assertItemsEqual( [path.join(self._temp_dir, expected_file) for expected_file in ["test-00000-of-00002.tfrecord", "test-00001-of-00002.tfrecord"]], glob(path.join(self._temp_dir, "test-*")) ) train_examples = self._read_examples("train-*") expected_train_examples = [ self._create_example( { 'context': "AAAA", 'context_author': "author-A", 'response': "BBBB", 'response_author': "author-B", 'subreddit': "subreddit-A", 'thread_id': 'thread-A', } ), self._create_example( { 'context/0': "AAAA", 'context': "BBBB", 'context_author': "author-B", 'response': "CCCC", 'response_author': "author-C", 'subreddit': "subreddit-A", 'thread_id': 'thread-A', } ), self._create_example( { 'context/0': "AAAA", 'context': "BBBB", 'context_author': "author-B", 'response': "DDDD", 'response_author': "author-D", 'subreddit': "subreddit-A", 'thread_id': 'thread-A', } ), self._create_example( { 'context/1': "AAAA", 'context/0': "BBBB", 'context': "DDDD", 'context_author': "author-D", 'response': "EEEE", 'response_author': "author-E", 'subreddit': "subreddit-A", 'thread_id': 'thread-A', } ), ] self.assertItemsEqual(expected_train_examples, train_examples) expected_test_examples = [] for example in expected_train_examples: example.features.feature['thread_id'].bytes_list.value[0] = ( "testthread").encode("utf-8") expected_test_examples.append(example) test_examples = self._read_examples("test-*") self.assertItemsEqual(expected_test_examples, test_examples) def test_run_json(self): with open("reddit/testdata/simple_thread.json") as f: comments = json.loads(f.read()) # Duplicate the thread with a different ID, chosing a link_id that # will be put in the test set. test_comments = [] for comment in comments: test_comment = copy.copy(comment) test_comment['link_id'] = "t3_testthread" test_comments.append(test_comment) create_data.run( argv=[ "--runner=DirectRunner", "--reddit_table=ignored", "--output_dir=" + self._temp_dir, "--dataset_format=JSON", "--num_shards_test=2", "--num_shards_train=2", "--min_length=4", "--max_length=5", "--train_split=0.5", ], comments=(comments + test_comments) ) self.assertItemsEqual( [path.join(self._temp_dir, expected_file) for expected_file in ["train-00000-of-00002.json", "train-00001-of-00002.json"]], glob(path.join(self._temp_dir, "train-*")) ) self.assertItemsEqual( [path.join(self._temp_dir, expected_file) for expected_file in ["test-00000-of-00002.json", "test-00001-of-00002.json"]], glob(path.join(self._temp_dir, "test-*")) ) train_examples = self._read_json_examples("train-*") expected_train_examples = [ { 'context': "AAAA", 'context_author': "author-A", 'response': "BBBB", 'response_author': "author-B", 'subreddit': "subreddit-A", 'thread_id': 'thread-A', }, { 'context/0': "AAAA", 'context': "BBBB", 'context_author': "author-B", 'response': "CCCC", 'response_author': "author-C", 'subreddit': "subreddit-A", 'thread_id': 'thread-A', }, { 'context/0': "AAAA", 'context': "BBBB", 'context_author': "author-B", 'response': "DDDD", 'response_author': "author-D", 'subreddit': "subreddit-A", 'thread_id': 'thread-A', }, { 'context/1': "AAAA", 'context/0': "BBBB", 'context': "DDDD", 'context_author': "author-D", 'response': "EEEE", 'response_author': "author-E", 'subreddit': "subreddit-A", 'thread_id': 'thread-A', } ] self.assertItemsEqual(expected_train_examples, train_examples) expected_test_examples = [] for example in expected_train_examples: example['thread_id'] = "testthread" expected_test_examples.append(example) test_examples = self._read_json_examples("test-*") self.assertItemsEqual(expected_test_examples, test_examples) def _read_examples(self, pattern): examples = [] for file_name in glob(path.join(self._temp_dir, pattern)): for record in tf.io.tf_record_iterator(file_name): example = tf.train.Example() example.ParseFromString(record) examples.append(example) return examples def _read_json_examples(self, pattern): examples = [] for file_name in glob(path.join(self._temp_dir, pattern)): for line in open(file_name): examples.append(json.loads(line)) return examples @staticmethod def _create_example(features): example = tf.train.Example() for feature_name, feature_value in features.items(): example.features.feature[feature_name].bytes_list.value.append( feature_value.encode("utf-8")) return example class CreateDataTest(unittest.TestCase): """Test individual helper functions.""" def test_trim(self): self.assertEqual( "Matthew", create_data.trim("Matthew Henderson", 7) ) def test_trim_do_not_split_word(self): self.assertEqual( "Matthew ", create_data.trim("Matthew Henderson", 9) ) def test_trim_string_short(self): self.assertEqual( "Matthew", create_data.trim("Matthew", 9) ) def test_trim_long_word(self): self.assertEqual( "", create_data.trim("Matthew", 2) ) def test_normalise_comment(self): comment = create_data.normalise_comment( { 'body': "ABC EFG HIJ KLM NOP", 'score_hidden': None, 'archived': None, 'name': None, 'author_flair_text': None, 'downs': None, 'created_utc': "1520704245", 'subreddit_id': "t5_AAAAA", 'link_id': "t3_BBBBB", 'parent_id': "t1_CCCCC", 'score': "1", 'retrieved_on': "1525020075", 'controversiality': "0", 'gilded': "0", 'id': "DDDDD", 'subreddit': "EEEEE", 'author': "FFFFF", 'ups': None, 'distinguished': None, 'author_flair_css_class': None, }, max_length=16) self.assertEqual( comment, create_data.Comment( body="ABC EFG HIJ KLM ", thread_id="BBBBB", parent_id="CCCCC", id="DDDDD", body_is_trimmed=True, subreddit="EEEEE", author="FFFFF", ) ) def test_linear_paths(self): with open("reddit/testdata/thread.json") as f: comments = json.loads(f.read()) comments = [ create_data.normalise_comment(comment, max_length=127) for comment in comments] id_to_comment = {comment.id: comment for comment in comments} paths = list(create_data.linear_paths(id_to_comment, parent_depth=100)) self.assertItemsEqual( [["dvedzte", "dvfdfd4"], ["dvedzte", "dveh7r5"], ["dve3v95", "dvhjrkc"], ["dve3v95", "dvhjrkc", "dvhktmd"], ["dve3v95", "dvhjrkc", "dvhktmd", "dvhn7hh"], ["dve3v95", "dvhjrkc", "dvhktmd", "dvhn7hh", "dvhvg4m"]], paths ) @staticmethod def _create_test_comment(id, parent_id): return create_data.Comment( body="body", thread_id="thread_id", parent_id=parent_id, id=id, body_is_trimmed=True, subreddit="subreddit", author="author", ) def test_linear_paths_with_self_loop(self): id_to_comment = { "1": self._create_test_comment(id="1", parent_id="1"), } paths = list(create_data.linear_paths(id_to_comment, parent_depth=100)) self.assertEqual([], paths) def test_linear_paths_with_loop(self): id_to_comment = { "1": self._create_test_comment(id="1", parent_id="3"), "2": self._create_test_comment(id="2", parent_id="1"), "3": self._create_test_comment(id="3", parent_id="2"), } paths = list(create_data.linear_paths(id_to_comment, parent_depth=100)) self.assertEqual([], paths) def test_linear_paths_with_stranded_threads(self): """Check that it picks up threads whose parents are missing.""" id_to_comment = { "1": self._create_test_comment(id="1", parent_id="unseen"), "2": self._create_test_comment(id="2", parent_id="1"), "3": self._create_test_comment(id="3", parent_id="unseen 2"), "4": self._create_test_comment(id="4", parent_id="3"), } paths = list(create_data.linear_paths(id_to_comment, parent_depth=100)) self.assertItemsEqual([ ["1", "2"], ["3", "4"], ], paths) def test_long_thread(self): """Check there is no issue with long threads (e.g. recursion limits)""" id_to_comment = { i: self._create_test_comment(id=i, parent_id=i - 1) for i in range(2000) } paths = list(create_data.linear_paths(id_to_comment, parent_depth=10)) self.assertItemsEqual([ range(max(i - 11, 0), i) for i in range(2, 2001) ], paths) if __name__ == "__main__": unittest.main()
12,907
33.148148
79
py
conversational-datasets
conversational-datasets-master/amazon_qa/create_data.py
"""A Dataflow script for creating Amazon question/answer data. For usage see README.md. """ import argparse import ast import hashlib import json import logging import os import uuid from functools import partial import apache_beam as beam import tensorflow as tf from apache_beam import pvalue from apache_beam.io.textio import ReadFromText, WriteToText from apache_beam.io.tfrecordio import WriteToTFRecord from apache_beam.options.pipeline_options import PipelineOptions, SetupOptions _TF_FORMAT = "TF" _JSON_FORMAT = "JSON" def _parse_args(argv=None): """Parse command-line args.""" def _positive_int(value): """Define a positive integer ArgumentParser type.""" value = int(value) if value <= 0: raise argparse.ArgumentTypeError( "Value must be positive, {} was passed.".format(value)) return value parser = argparse.ArgumentParser() parser.add_argument( "--file_pattern", required=True, help="File pattern for amazon qa files on Google cloud storage.", ) parser.add_argument( "--output_dir", required=True, help="Output directory to write the dataset on Google cloud storage.", ) parser.add_argument( "--dataset_format", choices={_TF_FORMAT, _JSON_FORMAT}, default="TF", help="The dataset format to write. 'TF' for serialized tensorflow " "examples in TFRecords. 'JSON' for text files with one JSON " "object per line." ) parser.add_argument( "--max_words", type=_positive_int, default=59, help="Maximum number of words a Q or A can have to be included.", ) parser.add_argument( "--min_words", type=_positive_int, default=4, help="Minimum number of words a Q or A must have to be included.", ) parser.add_argument( "--train_split", default=0.9, type=float, help="The proportion of data to put in the training set.", ) parser.add_argument( "--num_shards_test", default=10, type=_positive_int, help="The number of shards for the test set.", ) parser.add_argument( "--num_shards_train", default=100, type=_positive_int, help="The number of shards for the train set.", ) return parser.parse_known_args(argv) def _create_tuples(qa_object, min_words, max_words): """Creates (product_id, question, answer) tuples.""" if "question" in qa_object: question = qa_object['question'] answer = qa_object['answer'] product_id = qa_object['asin'] if (_should_skip(question, min_words, max_words) or _should_skip(answer, min_words, max_words)): return yield (product_id, question, answer) elif "questions" in qa_object: product_id = qa_object['asin'] for question_obj in qa_object['questions']: question = question_obj['questionText'] if _should_skip(question, min_words, max_words): continue for answer_obj in question_obj['answers']: answer = answer_obj['answerText'] if _should_skip(answer, min_words, max_words): continue yield (product_id, question, answer) def _should_skip(text, min_words, max_words): # Estimate the number of words by splitting on spaces. num_words = len(text.split(" ")) return num_words < min_words or num_words > max_words def _create_example(product_id, question, answer): """Create an example dictionary.""" return { 'product_id': product_id, 'context': question, 'response': answer, } def _shuffle_examples(examples): examples |= "add random key" >> beam.Map( lambda example: (uuid.uuid4(), example) ) examples |= "group by key" >> beam.GroupByKey() examples |= "get shuffled values" >> beam.FlatMap(lambda t: t[1]) return examples def _features_to_serialized_tf_example(features): """Convert a string dict to a serialized TF example. The dictionary maps feature names (strings) to feature values (strings). """ example = tf.train.Example() for feature_name, feature_value in features.items(): example.features.feature[feature_name].bytes_list.value.append( feature_value.encode("utf-8")) return example.SerializeToString() class _TrainTestSplitFn(beam.DoFn): """Splits an input PCollection of examples into train and test. This uses the product id to compute the split, so that examples from the same product are in the same set. The split is deterministic based on prodict id, so that multiple runs produce the same result.""" TRAIN_TAG = "train" TEST_TAG = "test" def __init__(self, train_split=0.9, num_buckets=4096): super(_TrainTestSplitFn, self).__init__() self._train_split = train_split self._num_buckets = num_buckets def process(self, example): split_value = self._split_value(example['product_id']) split = ( self.TRAIN_TAG if split_value < self._train_split else self.TEST_TAG) yield pvalue.TaggedOutput(split, example) def _split_value(self, product_id): """Compute a value from 0 to 1 used to compute the split.""" md5 = hashlib.md5() md5.update(product_id) md5_digest = int(md5.hexdigest(), 16) return ( (1 + md5_digest % self._num_buckets) / float(self._num_buckets) ) def run(argv=None): """Run the beam pipeline.""" args, pipeline_args = _parse_args(argv) pipeline_options = PipelineOptions(pipeline_args) pipeline_options.view_as(SetupOptions).save_main_session = True p = beam.Pipeline(options=pipeline_options) lines = p | "read qa files" >> ReadFromText(args.file_pattern) # The lines are not JSON, but the string representation of python # dictionary objects. Parse them with ast.literal_eval. json_objects = lines | "parsing dictionaries" >> beam.Map(ast.literal_eval) qa_tuples = json_objects | "create tuples" >> beam.FlatMap( partial( _create_tuples, min_words=args.min_words, max_words=args.max_words) ) # Remove duplicate examples. qa_tuples |= "key by QA" >> beam.Map(lambda v: (v[1:], v)) qa_tuples |= "group duplicates" >> beam.GroupByKey() qa_tuples |= "remove duplicates" >> beam.Map(lambda v: sorted(v[1])[0]) # Create the examples. examples = qa_tuples | "create examples" >> beam.Map( lambda args: _create_example(*args) ) examples = _shuffle_examples(examples) examples |= "split train and test" >> beam.ParDo( _TrainTestSplitFn(args.train_split) ).with_outputs(_TrainTestSplitFn.TEST_TAG, _TrainTestSplitFn.TRAIN_TAG) if args.dataset_format == _JSON_FORMAT: write_sink = WriteToText file_name_suffix = ".json" serialize_fn = json.dumps else: assert args.dataset_format == _TF_FORMAT write_sink = WriteToTFRecord file_name_suffix = ".tfrecord" serialize_fn = _features_to_serialized_tf_example for name, tag in [("train", _TrainTestSplitFn.TRAIN_TAG), ("test", _TrainTestSplitFn.TEST_TAG)]: serialized_examples = examples[tag] | ( "serialize {} examples".format(name) >> beam.Map(serialize_fn)) ( serialized_examples | ("write " + name) >> write_sink( os.path.join(args.output_dir, name), file_name_suffix=file_name_suffix, num_shards=args.num_shards_train, ) ) result = p.run() result.wait_until_finish() if __name__ == "__main__": logging.getLogger().setLevel(logging.INFO) run()
7,954
31.206478
79
py
conversational-datasets
conversational-datasets-master/amazon_qa/__init__.py
0
0
0
py
conversational-datasets
conversational-datasets-master/amazon_qa/create_data_test.py
"""Tests for create_data.py.""" import json import shutil import tempfile import unittest from glob import glob from os import path import tensorflow as tf from amazon_qa import create_data _TEST_DATA = [ { 'question': "A A A", 'answer': "B B B", 'asin': "3", # gets put in test set. }, { # Duplicate QA should not create duplicate examples. 'question': "A A A", 'answer': "B B B", 'asin': "4", }, { 'question': "A A A A A A", # too many words, will be skipped. 'answer': "B B B", 'asin': "4", }, { 'questions': [ { 'questionText': "C C C", 'answers': [ {'answerText': "D D D"}, {'answerText': "E E E"}, {'answerText': "E E"} # not enough words, will be skipped. ] }, { 'questionText': "F F F", 'answers': [ {'answerText': "G G G"}, ] }, ], 'asin': "1", # gets put in train set. }, { 'questions': [ { 'questionText': "H H H", 'answers': [ {'answerText': "I I I"}, ] }, ], 'asin': "2", # gets put in train set. }, ] class CreateDataPipelineTest(unittest.TestCase): def setUp(self): self._temp_dir = tempfile.mkdtemp() self.maxDiff = None def tearDown(self): shutil.rmtree(self._temp_dir) def test_run(self): # These filenames are chosen so that their hashes will cause them to # be put in the train and test set respectively. with open(path.join(self._temp_dir, "input-000"), "w") as f: for obj in _TEST_DATA: f.write(("%s\n" % obj).encode("utf-8")) create_data.run(argv=[ "--runner=DirectRunner", "--file_pattern={}/input*".format(self._temp_dir), "--output_dir=" + self._temp_dir, "--dataset_format=TF", "--num_shards_test=2", "--num_shards_train=2", "--min_words=3", "--max_words=5", "--train_split=0.5", ]) self.assertItemsEqual( [path.join(self._temp_dir, expected_file) for expected_file in ["train-00000-of-00002.tfrecord", "train-00001-of-00002.tfrecord"]], glob(path.join(self._temp_dir, "train-*")) ) self.assertItemsEqual( [path.join(self._temp_dir, expected_file) for expected_file in ["test-00000-of-00002.tfrecord", "test-00001-of-00002.tfrecord"]], glob(path.join(self._temp_dir, "test-*")) ) train_examples = self._read_examples("train-*") expected_train_examples = [ self.create_example("1", "C C C", "D D D"), self.create_example("1", "C C C", "E E E"), self.create_example("1", "F F F", "G G G"), self.create_example("2", "H H H", "I I I"), ] self.assertItemsEqual( expected_train_examples, train_examples ) test_examples = self._read_examples("test-*") expected_test_examples = [ self.create_example("3", "A A A", "B B B"), ] self.assertItemsEqual( expected_test_examples, test_examples ) def create_example(self, product_id, question, answer): example = tf.train.Example() example.features.feature['product_id'].bytes_list.value.append( product_id.encode("utf-8")) example.features.feature['context'].bytes_list.value.append( question.encode("utf-8")) example.features.feature['response'].bytes_list.value.append( answer.encode("utf-8")) return example def _read_examples(self, pattern): examples = [] for file_name in glob(path.join(self._temp_dir, pattern)): for record in tf.io.tf_record_iterator(file_name): example = tf.train.Example() example.ParseFromString(record) examples.append(example) return examples def test_run_json(self): # These filenames are chosen so that their hashes will cause them to # be put in the train and test set respectively. with open(path.join(self._temp_dir, "input-000"), "w") as f: for obj in _TEST_DATA: f.write(("%s\n" % obj).encode("utf-8")) create_data.run(argv=[ "--runner=DirectRunner", "--file_pattern={}/input*".format(self._temp_dir), "--output_dir=" + self._temp_dir, "--dataset_format=JSON", "--num_shards_test=2", "--num_shards_train=2", "--min_words=3", "--max_words=5", "--train_split=0.5", ]) self.assertItemsEqual( [path.join(self._temp_dir, expected_file) for expected_file in ["train-00000-of-00002.json", "train-00001-of-00002.json"]], glob(path.join(self._temp_dir, "train-*")) ) self.assertItemsEqual( [path.join(self._temp_dir, expected_file) for expected_file in ["test-00000-of-00002.json", "test-00001-of-00002.json"]], glob(path.join(self._temp_dir, "test-*")) ) train_examples = self._read_json_examples("train-*") expected_train_examples = [ self.create_json_example("1", "C C C", "D D D"), self.create_json_example("1", "C C C", "E E E"), self.create_json_example("1", "F F F", "G G G"), self.create_json_example("2", "H H H", "I I I"), ] self.assertItemsEqual( expected_train_examples, train_examples ) test_examples = self._read_json_examples("test-*") expected_test_examples = [ self.create_json_example("3", "A A A", "B B B"), ] self.assertItemsEqual( expected_test_examples, test_examples ) def create_json_example(self, product_id, question, answer): return { 'product_id': product_id, 'context': question, 'response': answer, } def _read_json_examples(self, pattern): examples = [] for file_name in glob(path.join(self._temp_dir, pattern)): for line in open(file_name): examples.append(json.loads(line)) return examples if __name__ == "__main__": unittest.main()
6,788
30.576744
79
py
conversational-datasets
conversational-datasets-master/baselines/run_baseline.py
"""Evaluate baseline models on conversational datasets. For usage see README.md. """ import argparse import csv import enum import random import glog import numpy as np import tensorflow as tf from tqdm import tqdm from baselines import keyword_based, vector_based def _parse_args(): """Parse command-line args.""" parser = argparse.ArgumentParser() parser.add_argument( "--method", type=Method.from_string, choices=list(Method), required=True, help="The baseline method to use.") parser.add_argument( "--recall_k", type=int, default=100, help="The value of k to compute recall at.") parser.add_argument( "--train_dataset", type=str, required=True, help="File pattern of train set.") parser.add_argument( "--train_size", type=int, default=10000, help="Number of examples from the training set to use in training.") parser.add_argument( "--test_dataset", type=str, required=True, help="File pattern of test set.") parser.add_argument( "--eval_num_batches", type=int, default=500, help="Number of batches to use in the evaluation.") parser.add_argument( "--output_file", type=str, help="Optional file to output result as a CSV row.") parser.add_argument( "--deduplicate_eval", default=False, action="store_true", help="If set, the evaluation will de-duplicate examples with " "identical contexts.") return parser.parse_args() class Method(enum.Enum): # Keyword based methods. TF_IDF = 1 BM25 = 2 # Vector similarity based methods. USE_SIM = 3 USE_LARGE_SIM = 4 ELMO_SIM = 5 BERT_SMALL_SIM = 6 BERT_LARGE_SIM = 7 USE_QA_SIM = 8 CONVERT_SIM = 9 # Vector mapping methods. USE_MAP = 10 USE_LARGE_MAP = 11 ELMO_MAP = 12 BERT_SMALL_MAP = 13 BERT_LARGE_MAP = 14 USE_QA_MAP = 15 CONVERT_MAP = 16 def to_method_object(self): """Convert the enum to an instance of `BaselineMethod`.""" if self == self.TF_IDF: return keyword_based.TfIdfMethod() elif self == self.BM25: return keyword_based.BM25Method() elif self == self.USE_SIM: return vector_based.VectorSimilarityMethod( encoder=vector_based.TfHubEncoder( "https://tfhub.dev/google/" "universal-sentence-encoder/2")) elif self == self.USE_LARGE_SIM: return vector_based.VectorSimilarityMethod( encoder=vector_based.TfHubEncoder( "https://tfhub.dev/google/" "universal-sentence-encoder-large/3")) elif self == self.ELMO_SIM: return vector_based.VectorSimilarityMethod( encoder=vector_based.TfHubEncoder( "https://tfhub.dev/google/elmo/1")) elif self == self.USE_MAP: return vector_based.VectorMappingMethod( encoder=vector_based.TfHubEncoder( "https://tfhub.dev/google/" "universal-sentence-encoder/2")) elif self == self.USE_LARGE_MAP: return vector_based.VectorMappingMethod( encoder=vector_based.TfHubEncoder( "https://tfhub.dev/google/" "universal-sentence-encoder-large/3")) elif self == self.ELMO_MAP: return vector_based.VectorMappingMethod( encoder=vector_based.TfHubEncoder( "https://tfhub.dev/google/elmo/1")) elif self == self.BERT_SMALL_SIM: return vector_based.VectorSimilarityMethod( encoder=vector_based.BERTEncoder( "https://tfhub.dev/google/" "bert_uncased_L-12_H-768_A-12/1")) elif self == self.BERT_SMALL_MAP: return vector_based.VectorMappingMethod( encoder=vector_based.BERTEncoder( "https://tfhub.dev/google/" "bert_uncased_L-12_H-768_A-12/1")) elif self == self.BERT_LARGE_SIM: return vector_based.VectorSimilarityMethod( encoder=vector_based.BERTEncoder( "https://tfhub.dev/google/" "bert_uncased_L-24_H-1024_A-16/1")) elif self == self.BERT_LARGE_MAP: return vector_based.VectorMappingMethod( encoder=vector_based.BERTEncoder( "https://tfhub.dev/google/" "bert_uncased_L-24_H-1024_A-16/1")) elif self == self.USE_QA_SIM: return vector_based.VectorSimilarityMethod( encoder=vector_based.USEDualEncoder( "https://tfhub.dev/google/" "universal-sentence-encoder-multilingual-qa/1")) elif self == self.USE_QA_MAP: return vector_based.VectorMappingMethod( encoder=vector_based.USEDualEncoder( "https://tfhub.dev/google/" "universal-sentence-encoder-multilingual-qa/1")) elif self == self.CONVERT_SIM: return vector_based.VectorSimilarityMethod( encoder=vector_based.ConveRTEncoder( "http://models.poly-ai.com/convert/v1/model.tar.gz")) elif self == self.CONVERT_MAP: return vector_based.VectorMappingMethod( encoder=vector_based.ConveRTEncoder( "http://models.poly-ai.com/convert/v1/model.tar.gz")) raise ValueError("Unknown method {}".format(self)) def __str__(self): """String representation to use in argparse help text.""" return self.name @staticmethod def from_string(s): """Convert a string parsed from argparse to an enum instance.""" try: return Method[s] except KeyError: raise ValueError() def _evaluate_method(method, recall_k, contexts, responses): accuracy_numerator = 0.0 accuracy_denominator = 0.0 for i in tqdm(range(0, len(contexts), recall_k)): context_batch = contexts[i:i + recall_k] responses_batch = responses[i:i + recall_k] if len(context_batch) != recall_k: break # Shuffle the responses. permutation = np.arange(recall_k) np.random.shuffle(permutation) context_batch_shuffled = [context_batch[j] for j in permutation] predictions = method.rank_responses( context_batch_shuffled, responses_batch) if predictions.shape != (recall_k, ): raise ValueError( "Predictions returned by method should have shape ({}, ), " "but saw {}".format(recall_k, predictions.shape)) accuracy_numerator += np.equal(predictions, permutation).mean() accuracy_denominator += 1.0 accuracy = 100 * accuracy_numerator / accuracy_denominator return accuracy def _load_data(file_pattern, num_examples, deduplicate=False): """Load contexts and responses from the given conversational dataset.""" contexts = [] responses = [] seen_contexts = set() complete = False with tqdm(total=num_examples) as progress_bar: file_names = tf.gfile.Glob(file_pattern) random.shuffle(file_names) if not file_names: raise ValueError( "No files matched pattern {}".format(file_pattern)) for file_name in file_names: glog.info("Reading %s", file_name) for record in tf.python_io.tf_record_iterator(file_name): example = tf.train.Example() example.ParseFromString(record) context = example.features.feature[ 'context'].bytes_list.value[0].decode("utf-8") if deduplicate and context in seen_contexts: continue if deduplicate: seen_contexts.add(context) contexts.append(context) response = example.features.feature[ 'response'].bytes_list.value[0].decode("utf-8") responses.append(response) progress_bar.update(1) if len(contexts) >= num_examples: complete = True break if complete: break glog.info("Read %i examples", len(contexts)) if not complete: glog.warning( "%i examples were requested, but dataset only contains %i.", num_examples, len(contexts)) return contexts, responses if __name__ == "__main__": args = _parse_args() method = args.method.to_method_object() glog.info("Loading training data") contexts_train, responses_train = _load_data( args.train_dataset, args.train_size) glog.info("Training %s method", args.method) method.train(contexts_train, responses_train) glog.info("Loading test data") contexts_test, responses_test = _load_data( args.test_dataset, args.eval_num_batches * args.recall_k, deduplicate=args.deduplicate_eval) glog.info("Running evaluation") accuracy = _evaluate_method( method, args.recall_k, contexts_test, responses_test) glog.info( "Final computed 1-of-%i accuracy is %.1f%%", args.recall_k, accuracy ) if args.output_file is not None: with open(args.output_file, "a") as f: csv_writer = csv.writer(f) csv_writer.writerow([ args.method, args.train_dataset, args.test_dataset, len(contexts_train), len(contexts_test), args.recall_k, accuracy ])
9,760
36.398467
76
py
conversational-datasets
conversational-datasets-master/baselines/method.py
"""Abstract class to define a baseline response selection method.""" import abc class BaselineMethod(object): """Abstract class to define a baseline response selection method.""" __metaclass__ = abc.ABCMeta @abc.abstractmethod def train(self, examples): """Perform any training steps using the (context, response) examples. Args: examples: a list of `(context, response)` string tuples, containing examples that can be used to set the parameters of this method. """ pass @abc.abstractmethod def rank_responses(self, contexts, responses): """Rank the responses for each context. Args: contexts: a list of strings giving the contexts to use. responses: a list of responses to rank, of the same length as `contexts`. These are shuffled, to help avoid cheating. Returns: predictions: a list of integers, giving the predictions indices between 0 and `len(contexts)` that the method predicts for assigning responses to the contexts. Explicitly, the method predicts that `reponse[predictions[i]]` is the correct response for `context[i]`. """ pass
1,289
33.864865
79
py
conversational-datasets
conversational-datasets-master/baselines/vector_based_test.py
"""Tests for vector_based.py.""" import os import tempfile import unittest import mock import numpy as np import tensorflow as tf from mock import patch from baselines import vector_based class TfHubEncoderTest(unittest.TestCase): @patch("tensorflow_hub.Module") def test_encode_context(self, mock_module_cls): mock_module_cls.return_value = lambda x: tf.ones( [tf.shape(x)[0], 3]) encoder = vector_based.TfHubEncoder("test_uri") mock_module_cls.assert_called_with("test_uri") encodings = encoder.encode_context(["hello"]) np.testing.assert_allclose([[1, 1, 1]], encodings) encodings = encoder.encode_context(["hello", "hi"]) np.testing.assert_allclose([[1, 1, 1], [1, 1, 1]], encodings) @patch("tensorflow_hub.Module") def test_encode_response(self, mock_module_cls): mock_module_cls.return_value = lambda x: tf.ones( [tf.shape(x)[0], 3]) encoder = vector_based.TfHubEncoder("test_uri") mock_module_cls.assert_called_with("test_uri") encodings = encoder.encode_response(["hello"]) np.testing.assert_allclose([[1, 1, 1]], encodings) encodings = encoder.encode_response(["hello", "hi"]) np.testing.assert_allclose([[1, 1, 1], [1, 1, 1]], encodings) class USEDualEncoderTest(unittest.TestCase): """Test USEDualEncoder.""" @patch("tensorflow_hub.Module") def test_encode_context(self, mock_module_cls): def mock_fn(inputs, signature, as_dict): self.assertTrue(as_dict) self.assertIn(signature, {"question_encoder", "response_encoder"}) if signature == "question_encoder": self.assertEqual(["input"], list(inputs.keys())) return {'outputs': tf.ones([tf.shape(inputs['input'])[0], 3])} else: self.assertEqual({"input", "context"}, set(inputs.keys())) return {'outputs': None} mock_module_cls.return_value = mock_fn encoder = vector_based.USEDualEncoder("test_uri") mock_module_cls.assert_called_with("test_uri") encodings = encoder.encode_context(["hello"]) np.testing.assert_allclose([[1, 1, 1]], encodings) @patch("tensorflow_hub.Module") def test_encode_response(self, mock_module_cls): def mock_fn(inputs, signature, as_dict): self.assertTrue(as_dict) self.assertIn(signature, {"question_encoder", "response_encoder"}) if signature == "response_encoder": self.assertEqual({"input", "context"}, set(inputs.keys())) return {'outputs': tf.ones([tf.shape(inputs['input'])[0], 3])} else: self.assertEqual(["input"], list(inputs.keys())) return {'outputs': None} mock_module_cls.return_value = mock_fn encoder = vector_based.USEDualEncoder("test_uri") mock_module_cls.assert_called_with("test_uri") encodings = encoder.encode_response(["hello"]) np.testing.assert_allclose([[1, 1, 1]], encodings) class ConveRTEncoderTest(unittest.TestCase): """Test ConveRTEncoder.""" @patch("tensorflow_hub.Module") def test_encode_context(self, mock_module_cls): def mock_fn(input, signature=None): self.assertIn(signature, {"encode_context", "encode_response"}) self.assertIsInstance(input, tf.Tensor) self.assertEqual(input.dtype, tf.string) if signature == "encode_context": return tf.ones([tf.shape(input)[0], 3]) mock_module_cls.return_value = mock_fn encoder = vector_based.ConveRTEncoder("test_uri") mock_module_cls.assert_called_with("test_uri") encodings = encoder.encode_context(["hello"]) np.testing.assert_allclose([[1, 1, 1]], encodings) @patch("tensorflow_hub.Module") def test_encode_response(self, mock_module_cls): def mock_fn(input, signature=None): self.assertIn(signature, {"encode_context", "encode_response"}) self.assertIsInstance(input, tf.Tensor) self.assertEqual(input.dtype, tf.string) if signature == "encode_response": return tf.ones([tf.shape(input)[0], 3]) mock_module_cls.return_value = mock_fn encoder = vector_based.ConveRTEncoder("test_uri") mock_module_cls.assert_called_with("test_uri") encodings = encoder.encode_response(["hello"]) np.testing.assert_allclose([[1, 1, 1]], encodings) class BERTEncoderTest(unittest.TestCase): @classmethod def setUpClass(cls): """Create a dummy vocabulary file.""" vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "hello", "hi", ] with tempfile.NamedTemporaryFile(delete=False) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) cls.vocab_file = vocab_writer.name @classmethod def tearDownClass(cls): """Delete the dummy vocabulary file.""" os.unlink(cls.vocab_file) @patch("tensorflow_hub.Module") def test_encode_context(self, mock_module_cls): def mock_module(inputs=None, signature=None, as_dict=None): self.assertTrue(as_dict) if signature == "tokens": self.assertEqual( {'input_mask', 'input_ids', 'segment_ids'}, inputs.viewkeys()) batch_size = tf.shape(inputs['input_ids'])[0] seq_len = tf.shape(inputs['input_ids'])[1] return { 'sequence_output': tf.ones([batch_size, seq_len, 3]) } self.assertEqual("tokenization_info", signature) return { 'do_lower_case': tf.constant(True), 'vocab_file': tf.constant(self.vocab_file), } mock_module_cls.return_value = mock_module encoder = vector_based.BERTEncoder("test_uri") self.assertEqual( [(("test_uri",), {'trainable': False})] * 2, mock_module_cls.call_args_list) # Final encodings will just be the count of the tokens in each # sentence, repeated 3 times. encodings = encoder.encode_context(["hello"]) np.testing.assert_allclose([[3, 3, 3]], encodings) encodings = encoder.encode_context(["hello", "hello hi"]) np.testing.assert_allclose([[3, 3, 3], [4, 4, 4]], encodings) class VectorSimilarityMethodTest(unittest.TestCase): def test_train(self): vector_based.VectorSimilarityMethod(None).train(["x", "y"], ["a", "b"]) def test_rank_responses(self): mock_encoder = mock.Mock() mock_encoder.encode_context.return_value = np.asarray([ [1, 0, 0], [0, 1, 0], [0, 1, 1], ], dtype=np.float32) mock_encoder.encode_response.return_value = np.asarray([ [1, 0, 0], [0, 1, 0], [0, 1, 1], ], dtype=np.float32) method = vector_based.VectorSimilarityMethod(mock_encoder) assignments = method.rank_responses( ["x", "y", "z"], ["a", "b", "c"] ) np.testing.assert_allclose([0, 1, 2], assignments) mock_encoder.encode_context.assert_has_calls([ mock.call(["x", "y", "z"]), ]) mock_encoder.encode_response.assert_has_calls([ mock.call(["a", "b", "c"]), ]) class VectorMappingMethodTest(unittest.TestCase): def test_train_then_rank(self): mock_encoder = mock.Mock() def _random_encode(texts): return np.random.normal(size=(len(texts), 3)) mock_encoder.encode_context.side_effect = _random_encode mock_encoder.encode_response.side_effect = _random_encode method = vector_based.VectorMappingMethod( mock_encoder, learning_rates=[1], regularizers=[0]) # Use 104 elements, so that the encoding must be batched. method.train(["context"] * 104, ["response"] * 104) mock_encoder.encode_context.assert_has_calls([ mock.call(["context"] * 100), mock.call(["context"] * 4), ]) mock_encoder.encode_response.assert_has_calls([ mock.call(["response"] * 100), mock.call(["response"] * 4), ]) assignments = method.rank_responses( ["x", "y", "z"], ["a", "b", "c"] ) self.assertEqual((3, ), assignments.shape) for id_ in assignments: self.assertGreaterEqual(id_, 0) self.assertLess(id_, 3) if __name__ == "__main__": unittest.main()
8,741
34.681633
79
py
conversational-datasets
conversational-datasets-master/baselines/keyword_based.py
"""Baseline response ranking methods using keyword matching.""" import numpy as np import scipy.sparse as sp from sklearn.feature_extraction.text import (HashingVectorizer, TfidfTransformer, _document_frequency) from sklearn.utils.testing import ignore_warnings from baselines import method class BM25Method(method.BaselineMethod): """BM25 baseline, using weighted keyword matching. Adapted from https://github.com/arosh/BM25Transformer/blob/master/bm25.py see Okapi BM25: a non-binary model - Introduction to Information Retrieval http://nlp.stanford.edu/IR-book/html/htmledition/okapi-bm25-a-non-binary-model-1.html Args: k1: float, optional (default=2.0) b: float, optional (default=0.75) """ def __init__(self, k1=2.0, b=0.75): """Create a new `BM25Method` object.""" self._k1 = k1 self._b = b def train(self, contexts, responses): """Fit the tf-idf transform and compute idf statistics.""" with ignore_warnings(): # Ignore deprecated `non_negative` warning. self._vectorizer = HashingVectorizer(non_negative=True) self._tfidf_transform = TfidfTransformer() count_matrix = self._tfidf_transform.fit_transform( self._vectorizer.transform(contexts + responses)) n_samples, n_features = count_matrix.shape df = _document_frequency(count_matrix) idf = np.log((n_samples - df + 0.5) / (df + 0.5)) self._idf_diag = sp.spdiags( idf, diags=0, m=n_features, n=n_features ) document_lengths = count_matrix.sum(axis=1) self._average_document_length = np.mean(document_lengths) print(self._average_document_length) def _vectorize(self, strings): """Vectorize the given strings.""" with ignore_warnings(): # Ignore deprecated `non_negative` warning. tf_idf_vectors = self._tfidf_transform.transform( self._vectorizer.transform(strings)) tf_idf_vectors = sp.csr_matrix( tf_idf_vectors, dtype=np.float64, copy=True) # Document length (number of terms) in each row # Shape is (n_samples, 1) document_lengths = tf_idf_vectors.sum(axis=1) # Number of non-zero elements in each row # Shape is (n_samples, ) num_terms = tf_idf_vectors.indptr[1:] - tf_idf_vectors.indptr[0:-1] # In each row, repeat `document_lengths` for `num_terms` times # Shape is (sum(num_terms), ) rep = np.repeat(np.asarray(document_lengths), num_terms) # Compute BM25 score only for non-zero elements data = tf_idf_vectors.data * (self._k1 + 1) / ( tf_idf_vectors.data + self._k1 * ( 1 - self._b + self._b * rep / self._average_document_length)) vectors = sp.csr_matrix( (data, tf_idf_vectors.indices, tf_idf_vectors.indptr), shape=tf_idf_vectors.shape) vectors = vectors * self._idf_diag return vectors def rank_responses(self, contexts, responses): """Rank the responses for each context.""" contexts_matrix = self._vectorize(contexts) responses_matrix = self._vectorize(responses) similarities = contexts_matrix.dot(responses_matrix.T).toarray() return np.argmax(similarities, axis=1) class TfIdfMethod(method.BaselineMethod): """TF-IDF baseline. This hashes words to sparse IDs, and then computes tf-idf statistics for these hashed IDs. As a result, no words are considered out-of-vocabulary. """ def train(self, contexts, responses): """Fit the tf-idf transform and compute idf statistics.""" self._vectorizer = HashingVectorizer() self._tfidf_transform = TfidfTransformer() self._tfidf_transform.fit( self._vectorizer.transform(contexts + responses)) def _vectorize(self, strings): """Vectorize the given strings.""" tf_idf_vectors = self._tfidf_transform.transform( self._vectorizer.transform(strings)) return sp.csr_matrix( tf_idf_vectors, dtype=np.float64, copy=True) def rank_responses(self, contexts, responses): """Rank the responses for each context.""" contexts_matrix = self._vectorize(contexts) responses_matrix = self._vectorize(responses) similarities = contexts_matrix.dot(responses_matrix.T).toarray() return np.argmax(similarities, axis=1)
4,593
38.947826
89
py
conversational-datasets
conversational-datasets-master/baselines/vector_based.py
"""Methods for conversational response ranking based on vector comparisons.""" import abc import itertools import shutil import tempfile import glog import numpy as np import tensorflow as tf import tensorflow_hub import tensorflow_text # NOQA: required for PolyAI encoders. import tf_sentencepiece # NOQA: it is used when importing USE_QA. from sklearn.model_selection import train_test_split from tqdm import tqdm import bert.run_classifier import bert.tokenization from baselines import method class Encoder(object): """A model that maps from text to dense vectors.""" __metaclass__ = abc.ABCMeta @abc.abstractmethod def encode_context(self, contexts): """Encode the given texts as vectors. Args: contexts: a list of N strings, to be encoded. Returns: an (N, d) numpy matrix of encodings. """ pass def encode_response(self, responses): """Encode the given response texts as vectors. Args: responses: a list of N strings, to be encoded. Returns: an (N, d) numpy matrix of encodings. """ # Default to using the context encoding. return self.encode_context(responses) class TfHubEncoder(Encoder): """An encoder that is loaded as a module from tensorflow hub. The tensorflow hub module must take a vector of strings, and return a matrix of encodings. Args: uri: (string) the tensorflow hub URI for the model. """ def __init__(self, uri): """Create a new `TfHubEncoder` object.""" self._session = tf.Session(graph=tf.Graph()) with self._session.graph.as_default(): glog.info("Loading %s model from tensorflow hub", uri) embed_fn = tensorflow_hub.Module(uri) self._fed_texts = tf.placeholder(shape=[None], dtype=tf.string) self._context_embeddings = embed_fn(self._fed_texts) init_ops = ( tf.global_variables_initializer(), tf.tables_initializer()) glog.info("Initializing graph.") self._session.run(init_ops) def encode_context(self, contexts): """Encode the given texts.""" return self._session.run( self._context_embeddings, {self._fed_texts: contexts}) class USEDualEncoder(Encoder): """A dual encoder following the USE_QA signatures. Args: uri: (string) the tensorflow hub URI for the model. """ def __init__(self, uri): """Create a new `USEDualEncoder` object.""" self._session = tf.Session(graph=tf.Graph()) with self._session.graph.as_default(): glog.info("Loading %s model from tensorflow hub", uri) embed_fn = tensorflow_hub.Module(uri) self._fed_texts = tf.placeholder(shape=[None], dtype=tf.string) self._context_embeddings = embed_fn( dict(input=self._fed_texts), signature="question_encoder", as_dict=True, )['outputs'] empty_strings = tf.fill( tf.shape(self._fed_texts), "" ) self._response_embeddings = embed_fn( dict(input=self._fed_texts, context=empty_strings), signature="response_encoder", as_dict=True, )['outputs'] init_ops = ( tf.global_variables_initializer(), tf.tables_initializer()) glog.info("Initializing graph.") self._session.run(init_ops) def encode_context(self, contexts): """Encode the given texts as contexts.""" return self._session.run( self._context_embeddings, {self._fed_texts: contexts}) def encode_response(self, responses): """Encode the given texts as responses.""" return self._session.run( self._response_embeddings, {self._fed_texts: responses}) class ConveRTEncoder(Encoder): """The ConveRT encoder. See https://github.com/PolyAI-LDN/polyai-models. Args: uri: (string) the tensorflow hub URI for the model. """ def __init__(self, uri): """Create a new `ConveRTEncoder` object.""" self._session = tf.Session(graph=tf.Graph()) with self._session.graph.as_default(): glog.info("Loading %s model from tensorflow hub", uri) embed_fn = tensorflow_hub.Module(uri) self._fed_texts = tf.placeholder(shape=[None], dtype=tf.string) self._context_embeddings = embed_fn( self._fed_texts, signature="encode_context") self._response_embeddings = embed_fn( self._fed_texts, signature="encode_response") init_ops = ( tf.global_variables_initializer(), tf.tables_initializer()) glog.info("Initializing graph.") self._session.run(init_ops) def encode_context(self, contexts): """Encode the given texts as contexts.""" return self._session.run( self._context_embeddings, {self._fed_texts: contexts}) def encode_response(self, responses): """Encode the given texts as responses.""" return self._session.run( self._response_embeddings, {self._fed_texts: responses}) class BERTEncoder(Encoder): """The BERT encoder that is loaded as a module from tensorflow hub. This class tokenizes the input text using the bert tokenization library. The final encoding is computed as the sum of the token embeddings. Args: uri: (string) the tensorflow hub URI for the model. """ def __init__(self, uri): """Create a new `BERTEncoder` object.""" if not tf.test.is_gpu_available(): glog.warning( "No GPU detected, BERT will run a lot slower than with a GPU.") self._session = tf.Session(graph=tf.Graph()) with self._session.graph.as_default(): glog.info("Loading %s model from tensorflow hub", uri) embed_fn = tensorflow_hub.Module(uri, trainable=False) self._tokenizer = self._create_tokenizer_from_hub_module(uri) self._input_ids = tf.placeholder( name="input_ids", shape=[None, None], dtype=tf.int32) self._input_mask = tf.placeholder( name="input_mask", shape=[None, None], dtype=tf.int32) self._segment_ids = tf.zeros_like(self._input_ids) bert_inputs = dict( input_ids=self._input_ids, input_mask=self._input_mask, segment_ids=self._segment_ids ) embeddings = embed_fn( inputs=bert_inputs, signature="tokens", as_dict=True)[ "sequence_output" ] mask = tf.expand_dims( tf.cast(self._input_mask, dtype=tf.float32), -1) self._embeddings = tf.reduce_sum(mask * embeddings, axis=1) init_ops = ( tf.global_variables_initializer(), tf.tables_initializer()) glog.info("Initializing graph.") self._session.run(init_ops) def encode_context(self, contexts): """Encode the given texts.""" return self._session.run(self._embeddings, self._feed_dict(contexts)) @staticmethod def _create_tokenizer_from_hub_module(uri): """Get the vocab file and casing info from the Hub module.""" with tf.Graph().as_default(): bert_module = tensorflow_hub.Module(uri, trainable=False) tokenization_info = bert_module( signature="tokenization_info", as_dict=True) with tf.Session() as sess: vocab_file, do_lower_case = sess.run( [ tokenization_info["vocab_file"], tokenization_info["do_lower_case"] ]) return bert.tokenization.FullTokenizer( vocab_file=vocab_file, do_lower_case=do_lower_case) def _feed_dict(self, texts, max_seq_len=128): """Create a feed dict for feeding the texts as input. This uses dynamic padding so that the maximum sequence length is the smaller of `max_seq_len` and the longest sequence actually found in the batch. (The code in `bert.run_classifier` always pads up to the maximum even if the examples in the batch are all shorter.) """ all_ids = [] for text in texts: tokens = ["[CLS]"] + self._tokenizer.tokenize(text) # Possibly truncate the tokens. tokens = tokens[:(max_seq_len - 1)] tokens.append("[SEP]") ids = self._tokenizer.convert_tokens_to_ids(tokens) all_ids.append(ids) max_seq_len = max(map(len, all_ids)) input_ids = [] input_mask = [] for ids in all_ids: mask = [1] * len(ids) # Zero-pad up to the sequence length. while len(ids) < max_seq_len: ids.append(0) mask.append(0) input_ids.append(ids) input_mask.append(mask) return {self._input_ids: input_ids, self._input_mask: input_mask} class VectorSimilarityMethod(method.BaselineMethod): """Ranks responses using cosine similarity of context & response vectors. Args: encoder: the `Encoder` object to use. """ def __init__(self, encoder): """Create a new `VectorSimilarityMethod` object.""" self._encoder = encoder def train(self, contexts, responses): """Train on the contexts and responses. Does nothing.""" pass def rank_responses(self, contexts, responses): """Rank the responses for each context, using cosine similarity.""" contexts_matrix = self._encoder.encode_context(contexts) responses_matrix = self._encoder.encode_response(responses) responses_matrix /= np.linalg.norm( responses_matrix, axis=1, keepdims=True) similarities = np.matmul(contexts_matrix, responses_matrix.T) return np.argmax(similarities, axis=1) class VectorMappingMethod(method.BaselineMethod): """Applies a linear mapping to the response side and ranks with similarity. This learns a [dim, dim] weights matrix, and maps the response vector `x` to `x + weights.x`. The weights matrix is learned using gradient descent on the train set, and the dot product loss from https://arxiv.org/abs/1705.00652 . A grid search over hyper-parameters is performed, and the weights that get the best accuracy on the dev set are used. Args: encoder: the `Encoder` object to use. learning_rates: the learning rates to try in grid search. regularizers: the regularizers to try in grid search. """ def __init__( self, encoder, learning_rates=(10.0, 3.0, 1.0, 0.3, 0.01), regularizers=(0, 0.1, 0.01, 0.001), ): """Create a new `VectorMappingMethod` object.""" self._encoder = encoder self._learning_rates = learning_rates self._regularizers = regularizers def train(self, contexts, responses): """Train on the contexts and responses.""" glog.info( "Training on %i contexts and responses.", len(contexts)) (contexts_train, contexts_dev, responses_train, responses_dev ) = self._create_train_and_dev(contexts, responses) glog.info( "Created a training set of size %i, and a dev set of size %i.", contexts_train.shape[0], contexts_dev.shape[0]) self._build_mapping_graph( contexts_train, contexts_dev, responses_train, responses_dev ) self._grid_search() # Batch size to use when encoding texts. _ENCODING_BATCH_SIZE = 100 _TRAIN_BATCH_SIZE = 256 _MAX_EPOCHS = 100 def _create_train_and_dev(self, contexts, responses): """Create a train and dev set of context and response vectors.""" glog.info("Encoding the train set.") context_encodings = [] response_encodings = [] for i in tqdm(range(0, len(contexts), self._ENCODING_BATCH_SIZE)): contexts_batch = contexts[i:i + self._ENCODING_BATCH_SIZE] responses_batch = responses[i:i + self._ENCODING_BATCH_SIZE] context_encodings.append( self._encoder.encode_context(contexts_batch)) response_encodings.append( self._encoder.encode_response(responses_batch)) context_encodings = np.concatenate( context_encodings).astype(np.float32) response_encodings = np.concatenate( response_encodings).astype(np.float32) return train_test_split( context_encodings, response_encodings, test_size=0.2) def _build_mapping_graph(self, contexts_train, contexts_dev, responses_train, responses_dev): """Build the graph that applies a learned mapping to the vectors.""" self._session = tf.Session(graph=tf.Graph()) with self._session.graph.as_default(): def read_batch(contexts, responses, batch_size): dataset = tf.data.Dataset.from_tensor_slices( (contexts, responses)) dataset = dataset.shuffle(batch_size * 8) dataset = dataset.batch(batch_size) return dataset.make_initializable_iterator() self._train_iterator = read_batch( contexts_train, responses_train, batch_size=self._TRAIN_BATCH_SIZE) self._dev_iterator = read_batch( contexts_dev, responses_dev, batch_size=100) (contexts_batch_train, responses_batch_train) = self._train_iterator.get_next() (contexts_batch_dev, responses_batch_dev) = self._dev_iterator.get_next() # Create the train op. self._regularizer = tf.placeholder(dtype=tf.float32, shape=None) self._create_train_op( self._compute_similarities( contexts_batch_train, responses_batch_train, is_train=True) ) # Create the accuracy eval metric. dev_batch_size = tf.shape(contexts_batch_dev)[0] similarities = self._compute_similarities( contexts_batch_dev, responses_batch_dev, is_train=False) self._accuracy = tf.metrics.accuracy( labels=tf.range(dev_batch_size), predictions=tf.argmax(similarities, 1) ) # Create the inference graph. encoding_dim = int(contexts_batch_train.shape[1]) self._fed_context_encodings = tf.placeholder( dtype=tf.float32, shape=[None, encoding_dim] ) self._fed_response_encodings = tf.placeholder( dtype=tf.float32, shape=[None, encoding_dim] ) self._similarities = self._compute_similarities( self._fed_context_encodings, self._fed_response_encodings ) self._local_init_op = tf.local_variables_initializer() self._reset_op = tf.global_variables_initializer() self._saver = tf.train.Saver(max_to_keep=1) def _compute_similarities(self, context_encodings, response_encodings, is_train=False): """Compute the similarities between context and responses. Uses a learned mapping on the response side. """ with tf.variable_scope("compute_similarities", reuse=(not is_train)): # Normalise the vectors so that the model is not dependent on # vector scaling. context_encodings = tf.nn.l2_normalize(context_encodings, 1) response_encodings = tf.nn.l2_normalize(response_encodings, 1) encoding_dim = int(context_encodings.shape[1]) mapping_weights = tf.get_variable( "mapping_weights", dtype=tf.float32, shape=[encoding_dim, encoding_dim], initializer=tf.orthogonal_initializer(), regularizer=tf.contrib.layers.l2_regularizer( self._regularizer), ) residual_weight = tf.get_variable( "residual_weight", dtype=tf.float32, shape=[], initializer=tf.constant_initializer(1.0), ) responses_mapped = tf.matmul(response_encodings, mapping_weights) responses_mapped += residual_weight * response_encodings return tf.matmul( context_encodings, responses_mapped, transpose_b=True) def _create_train_op(self, similarities): """Create the train op.""" train_batch_size = tf.shape(similarities)[0] tf.losses.softmax_cross_entropy( onehot_labels=tf.one_hot( tf.range(train_batch_size), train_batch_size), label_smoothing=0.2, logits=similarities, reduction=tf.losses.Reduction.MEAN ) self._learning_rate = tf.placeholder(dtype=tf.float32, shape=None) self._train_op = tf.contrib.training.create_train_op( total_loss=tf.losses.get_total_loss(), optimizer=tf.train.AdagradOptimizer( learning_rate=self._learning_rate)) def _grid_search(self): """Perform a grid search of training hyper-parameters. The model that does the best on the dev set will be stored. """ save_path = tempfile.mkdtemp(prefix="VectorMappingMethod") def _compute_accuracy(): self._session.run(self._local_init_op) self._session.run(self._dev_iterator.initializer) while True: try: accuracy, _ = self._session.run(self._accuracy) except tf.errors.OutOfRangeError: return accuracy best_accuracy, best_learning_rate, best_regularizer = None, None, None for learning_rate, regularizer in itertools.product( self._learning_rates, self._regularizers): # Train using this learning rate and regularizer. self._session.run(self._reset_op) best_accuracy_for_run = None epochs_since_improvement = 0 epoch = 0 step = 0 glog.info( "\n\nTraining with learning_rate = %.5f, " "and regularizer = %.5f", learning_rate, regularizer) self._session.run(self._train_iterator.initializer) while epoch < self._MAX_EPOCHS: try: loss = self._session.run( self._train_op, {self._learning_rate: learning_rate, self._regularizer: regularizer}) step += 1 except tf.errors.OutOfRangeError: epoch += 1 accuracy = _compute_accuracy() log_suffix = "" self._session.run(self._train_iterator.initializer) if best_accuracy is None or accuracy > best_accuracy: best_accuracy = accuracy best_learning_rate = learning_rate best_regularizer = regularizer self._saver.save(self._session, save_path) log_suffix += "*" if (best_accuracy_for_run is None or accuracy > best_accuracy_for_run): epochs_since_improvement = 0 best_accuracy_for_run = accuracy log_suffix += "*" glog.info( "epoch %i: step: %i, loss: %.3f, " "dev accuracy: %.2f%% %s", epoch, step, loss, accuracy * 100, log_suffix) epochs_since_improvement += 1 if epochs_since_improvement >= 10: glog.info( "No improvement for %i epochs, terminating run.", epochs_since_improvement) break glog.info( "Best accuracy found was %.2f%%, with learning_rate = %.5f and " "regularizer = %.5f.", best_accuracy * 100, best_learning_rate, best_regularizer) self._saver.restore(self._session, save_path) shutil.rmtree(save_path) def rank_responses(self, contexts, responses): """Rank the responses for each context.""" similarities = self._session.run( self._similarities, { self._fed_context_encodings: self._encoder.encode_context( contexts), self._fed_response_encodings: self._encoder.encode_response( responses), } ) return np.argmax(similarities, axis=1)
21,348
37.605787
79
py
conversational-datasets
conversational-datasets-master/baselines/__init__.py
0
0
0
py
conversational-datasets
conversational-datasets-master/baselines/keyword_based_test.py
"""Tests for keyword_based.py.""" import unittest from baselines import keyword_based class TfIdfMethodTest(unittest.TestCase): def test_train_test(self): """Check that it can correctly rank a simple example.""" method = keyword_based.TfIdfMethod() method.train( ["hello how are you", "hello how are"], ["hello how", "hello"] ) predictions = method.rank_responses( ["hello", "how", "are", "you"], ["you", "are", "how", "hello"] ) self.assertEqual( list(predictions), [3, 2, 1, 0] ) def test_train_test_idf(self): """Check that the keyword with higher idf counts for more.""" method = keyword_based.TfIdfMethod() method.train( ["hello how are you", "hello how are"], ["hello how", "hello"] ) predictions = method.rank_responses( ["hello you", "hello you"], ["hello", "you"] ) self.assertEqual( list(predictions), [1, 1] # you is more informative than 'hello'. ) class BM25MethodTest(unittest.TestCase): def test_train_test_bm25(self): """Check that bm25 can correctly rank a simple example.""" method = keyword_based.BM25Method() method.train( ["hello how are you", "hello how are"], ["hello how", "hello"] ) predictions = method.rank_responses( ["hello", "how", "are"], ["are", "how", "hello"] ) self.assertEqual( list(predictions), [2, 1, 0] ) if __name__ == "__main__": unittest.main()
1,722
26.790323
69
py
conversational-datasets
conversational-datasets-master/opensubtitles/create_data.py
"""A Dataflow script for creating sentence pair data from text files. For usage see README.md. """ import argparse import hashlib import json import logging import os import re import uuid from functools import partial from os import path import apache_beam as beam import tensorflow as tf from apache_beam import pvalue from apache_beam.io.filesystems import FileSystems from apache_beam.io.textio import WriteToText from apache_beam.io.tfrecordio import WriteToTFRecord from apache_beam.options.pipeline_options import PipelineOptions, SetupOptions _TF_FORMAT = "TF" _JSON_FORMAT = "JSON" def _parse_args(argv=None): """Parse command-line args.""" def _positive_int(value): """Define a positive integer ArgumentParser type.""" value = int(value) if value <= 0: raise argparse.ArgumentTypeError( "Value must be positive, {} was passed.".format(value)) return value parser = argparse.ArgumentParser() parser.add_argument( "--sentence_files", required=True, help="The Google cloud storage file pattern of text files containing " "one sentence per line.") parser.add_argument( "--num_extra_contexts", default=10, help="The maximum number of extra contexts in an example.") parser.add_argument( "--min_length", default=9, type=_positive_int, help="The minimum length of a context / response to include.") parser.add_argument( "--max_length", default=127, type=_positive_int, help="The maximum length of a context / response to include.") parser.add_argument( "--output_dir", required=True, help="Output directory to write the dataset.") parser.add_argument( "--dataset_format", choices={_TF_FORMAT, _JSON_FORMAT}, default="TF", help="The dataset format to write. 'TF' for serialized tensorflow " "examples in TFRecords. 'JSON' for text files with one JSON " "object per line." ) parser.add_argument( "--train_split", default=0.9, type=float, help="The proportion of data to put in the training set.") parser.add_argument( "--num_shards_test", default=100, type=_positive_int, help="The number of shards for the test set.") parser.add_argument( "--num_shards_train", default=1000, type=_positive_int, help="The number of shards for the train set.") return parser.parse_known_args(argv) def _should_skip(line, min_length, max_length): """Whether a line should be skipped depending on the length.""" return len(line) < min_length or len(line) > max_length def create_example(previous_lines, line, file_id): """Creates examples with multi-line context The examples will include: file_id: the name of the file where these lines were obtained. response: the current line text context: the previous line text context/0: 2 lines before context/1: 3 lines before, etc. """ example = { 'file_id': file_id, 'context': previous_lines[-1], 'response': line, } example['file_id'] = file_id example['context'] = previous_lines[-1] extra_contexts = previous_lines[:-1] example.update({ 'context/{}'.format(i): context for i, context in enumerate(extra_contexts[::-1]) }) return example def _preprocess_line(line): line = line.decode("utf-8") # Remove the first word if it is followed by colon (speaker names) # NOTE: this wont work if the speaker's name has more than one word line = re.sub('(?:^|(?:[.!?]\\s))(\\w+):', "", line) # Remove anything between brackets (corresponds to acoustic events). line = re.sub("[\\[(](.*?)[\\])]", "", line) # Strip blanks hyphens and line breaks line = line.strip(" -\n") return line def _create_examples_from_file(file_name, min_length, max_length, num_extra_contexts): _, file_id = path.split(file_name) previous_lines = [] for line in FileSystems.open(file_name, "application/octet-stream"): line = _preprocess_line(line) if not line: continue should_skip = _should_skip( line, min_length=min_length, max_length=max_length) if previous_lines: should_skip |= _should_skip( previous_lines[-1], min_length=min_length, max_length=max_length) if not should_skip: yield create_example(previous_lines, line, file_id) previous_lines.append(line) if len(previous_lines) > num_extra_contexts + 1: del previous_lines[0] def _features_to_serialized_tf_example(features): """Convert a string dict to a serialized TF example. The dictionary maps feature names (strings) to feature values (strings). """ example = tf.train.Example() for feature_name, feature_value in features.items(): example.features.feature[feature_name].bytes_list.value.append( feature_value.encode("utf-8")) return example.SerializeToString() def _shuffle_examples(examples): examples |= ("add random key" >> beam.Map( lambda example: (uuid.uuid4(), example))) examples |= ("group by key" >> beam.GroupByKey()) examples |= ("get shuffled values" >> beam.FlatMap(lambda t: t[1])) return examples class _TrainTestSplitFn(beam.DoFn): """Splits an input PCollection of examples into train and test. This uses the file id (name) to compute the split, so that examples from the same file are in the same set. The split is deterministic based on the file id, so that multiple runs produce the same result. """ TRAIN_TAG = "train" TEST_TAG = "test" def __init__(self, train_split=0.9, num_buckets=4096): super(_TrainTestSplitFn, self).__init__() self._train_split = train_split self._num_buckets = num_buckets def process(self, example): split_value = self._split_value(example['file_id']) split = ( self.TRAIN_TAG if split_value < self._train_split else self.TEST_TAG) yield pvalue.TaggedOutput(split, example) def _split_value(self, file_id): """Compute a value from 0 to 1 used to compute the split.""" md5 = hashlib.md5() md5.update(file_id) md5_digest = int(md5.hexdigest(), 16) return ( (1 + md5_digest % self._num_buckets) / float(self._num_buckets) ) def run(argv=None): """Run the beam pipeline.""" args, pipeline_args = _parse_args(argv) pipeline_options = PipelineOptions(pipeline_args) pipeline_options.view_as(SetupOptions).save_main_session = True p = beam.Pipeline(options=pipeline_options) sentence_files_match = FileSystems.match([args.sentence_files])[0] sentence_files = [ file_metadata.path for file_metadata in sentence_files_match.metadata_list] logging.info("Reading %i files from %s.", len(sentence_files), args.sentence_files) assert len(sentence_files) > 0 sentence_files = p | beam.Create(sentence_files) examples = sentence_files | "create examples" >> beam.FlatMap( partial(_create_examples_from_file, min_length=args.min_length, max_length=args.max_length, num_extra_contexts=args.num_extra_contexts) ) examples = _shuffle_examples(examples) examples |= "split train and test" >> beam.ParDo( _TrainTestSplitFn(args.train_split)).with_outputs( _TrainTestSplitFn.TEST_TAG, _TrainTestSplitFn.TRAIN_TAG) if args.dataset_format == _JSON_FORMAT: write_sink = WriteToText file_name_suffix = ".json" serialize_fn = json.dumps else: assert args.dataset_format == _TF_FORMAT write_sink = WriteToTFRecord file_name_suffix = ".tfrecord" serialize_fn = _features_to_serialized_tf_example for name, tag in [("train", _TrainTestSplitFn.TRAIN_TAG), ("test", _TrainTestSplitFn.TEST_TAG)]: serialized_examples = examples[tag] | ( "serialize {} examples".format(name) >> beam.Map(serialize_fn)) ( serialized_examples | ("write " + name) >> write_sink( os.path.join(args.output_dir, name), file_name_suffix=file_name_suffix, num_shards=args.num_shards_train, ) ) result = p.run() result.wait_until_finish() if __name__ == "__main__": logging.getLogger().setLevel(logging.INFO) run()
8,838
31.141818
78
py
conversational-datasets
conversational-datasets-master/opensubtitles/__init__.py
0
0
0
py
conversational-datasets
conversational-datasets-master/opensubtitles/create_data_test.py
"""Tests for create_data.py.""" import json import shutil import tempfile import unittest from glob import glob from os import path import tensorflow as tf from opensubtitles import create_data _TRAIN_FILE = "\n".join([ "matt: AAAA", # words followed by colons are stripped. "[skip]", # text in brackets is removed. "BBBB", "", "", "" # empty lines are ignored. "CCCC", "(all laughing)", "c3po:", "- DDDD (boom!)", "123", # line length will be below the test --min_length. "12345", # line length will be above the test --min_length. ]) _TEST_FILE = """ aaaa bbbb cccc dddd """ class CreateDataPipelineTest(unittest.TestCase): def setUp(self): self._temp_dir = tempfile.mkdtemp() self.maxDiff = None def tearDown(self): shutil.rmtree(self._temp_dir) def test_run(self): # These filenames are chosen so that their hashes will cause them to # be put in the train and test set respectively. with open(path.join(self._temp_dir, "input_train.txt"), "w") as f: f.write(_TRAIN_FILE.encode("utf-8")) with open(path.join(self._temp_dir, "input_test.txt"), "w") as f: f.write(_TEST_FILE.encode("utf-8")) create_data.run(argv=[ "--runner=DirectRunner", "--sentence_files={}/*.txt".format(self._temp_dir), "--output_dir=" + self._temp_dir, "--dataset_format=TF", "--num_shards_test=2", "--num_shards_train=2", "--min_length=4", "--max_length=5", "--train_split=0.5", ]) self.assertItemsEqual( [path.join(self._temp_dir, expected_file) for expected_file in ["train-00000-of-00002.tfrecord", "train-00001-of-00002.tfrecord"]], glob(path.join(self._temp_dir, "train-*")) ) self.assertItemsEqual( [path.join(self._temp_dir, expected_file) for expected_file in ["test-00000-of-00002.tfrecord", "test-00001-of-00002.tfrecord"]], glob(path.join(self._temp_dir, "test-*")) ) train_examples = self._read_examples("train-*") expected_train_examples = [ self.create_example( ["AAAA"], "BBBB", "input_train.txt"), self.create_example( ["AAAA", "BBBB"], "CCCC", "input_train.txt"), self.create_example( ["AAAA", "BBBB", "CCCC"], "DDDD", "input_train.txt"), ] self.assertItemsEqual( expected_train_examples, train_examples ) test_examples = self._read_examples("test-*") expected_test_examples = [ self.create_example( ["aaaa"], "bbbb", "input_test.txt"), self.create_example( ["aaaa", "bbbb"], "cccc", "input_test.txt"), self.create_example( ["aaaa", "bbbb", "cccc"], "dddd", "input_test.txt"), ] self.assertItemsEqual( expected_test_examples, test_examples ) def create_example(self, previous_lines, line, file_id): features = create_data.create_example(previous_lines, line, file_id) example = tf.train.Example() for feature_name, feature_value in features.items(): example.features.feature[feature_name].bytes_list.value.append( feature_value.encode("utf-8")) return example def _read_examples(self, pattern): examples = [] for file_name in glob(path.join(self._temp_dir, pattern)): for record in tf.io.tf_record_iterator(file_name): example = tf.train.Example() example.ParseFromString(record) examples.append(example) return examples def test_run_json(self): # These filenames are chosen so that their hashes will cause them to # be put in the train and test set respectively. with open(path.join(self._temp_dir, "input_train.txt"), "w") as f: f.write(_TRAIN_FILE.encode("utf-8")) with open(path.join(self._temp_dir, "input_test.txt"), "w") as f: f.write(_TEST_FILE.encode("utf-8")) create_data.run(argv=[ "--runner=DirectRunner", "--sentence_files={}/*.txt".format(self._temp_dir), "--output_dir=" + self._temp_dir, "--dataset_format=JSON", "--num_shards_test=2", "--num_shards_train=2", "--min_length=4", "--max_length=5", "--train_split=0.5", ]) self.assertItemsEqual( [path.join(self._temp_dir, expected_file) for expected_file in ["train-00000-of-00002.json", "train-00001-of-00002.json"]], glob(path.join(self._temp_dir, "train-*")) ) self.assertItemsEqual( [path.join(self._temp_dir, expected_file) for expected_file in ["test-00000-of-00002.json", "test-00001-of-00002.json"]], glob(path.join(self._temp_dir, "test-*")) ) train_examples = self._read_json_examples("train-*") expected_train_examples = [ create_data.create_example( ["AAAA"], "BBBB", "input_train.txt"), create_data.create_example( ["AAAA", "BBBB"], "CCCC", "input_train.txt"), create_data.create_example( ["AAAA", "BBBB", "CCCC"], "DDDD", "input_train.txt"), ] self.assertItemsEqual( expected_train_examples, train_examples ) test_examples = self._read_json_examples("test-*") expected_test_examples = [ create_data.create_example( ["aaaa"], "bbbb", "input_test.txt"), create_data.create_example( ["aaaa", "bbbb"], "cccc", "input_test.txt"), create_data.create_example( ["aaaa", "bbbb", "cccc"], "dddd", "input_test.txt"), ] self.assertItemsEqual( expected_test_examples, test_examples ) def _read_json_examples(self, pattern): examples = [] for file_name in glob(path.join(self._temp_dir, pattern)): for line in open(file_name): examples.append(json.loads(line)) return examples if __name__ == "__main__": unittest.main()
6,519
32.435897
76
py
mcbeth
mcbeth-master/mcbeth/python/setup.py
import os from setuptools import find_packages, setup name = 'mcl' version = '0.1.0.dev' description = 'A programming language for the measurement calculus' long_description = open('README.md', encoding='utf-8').read() reqs = open('requirements.txt').readlines() requirements = [r.strip() for r in reqs] packs = (['mcl']) setup( name=name, version=version, url='http://github.com/seunomonije/mclang', install_requires=requirements, description=description, long_description=long_description, packages=packs, package_data={'mcl': ['mcl/*']}, )
563
22.5
67
py
mcbeth
mcbeth-master/mcbeth/python/CirqBuilder.py
import cirq import sys import json import numpy as np class XBasisMeasurementSignal(cirq.SingleQubitGate): """Custom measurement signal to measure in the X-basis with specified angle. These will signal to the program where measurements need to go at the end of the program. This method DOES NOT handle any measurmement. """ def __init__(self, theta): super(XBasisMeasurementSignal, self) self.theta = theta def _decompose_(self, qubits): # Note the qubits here is a tuple with one yield cirq.H(*qubits) yield cirq.Rx(rads=self.theta)(*qubits) def _circuit_diagram_info_(self, args): return f"XRot({self.theta})" class OpResolver: def _resolve_prep( qubit: cirq.LineQubit, **kwargs, ): return cirq.H(qubit) def _resolve_x_correction( qubits: 'list[cirq.LineQubit]', **kwargs, ): signal_qubits = kwargs.get('signal_qubits') # We know there's only one qubit in the cirq_qubits list here, # so we can use the * operator to deconstruct safely. if signal_qubits: ops = [] for signal in signal_qubits: ops.append(cirq.CX(signal, *qubits)) return ops else: return cirq.X(*qubits) def _resolve_z_correction( qubits: 'list[cirq.LineQubit]', **kwargs, ): signal_qubits = kwargs.get('signal_qubits') # We know there's only one qubit in the cirq_qubits list here, # so we can use the * operator to deconstruct safely. if signal_qubits: ops = [] for signal in signal_qubits: ops.append(cirq.CZ(signal, *qubits)) return ops else: return cirq.Z(*qubits) def _resolve_entanglement( qubits: 'list[cirq.LineQubit]', **kwargs, ): return cirq.CZ(*qubits) def _resolve_measurement( qubits: 'list[cirq.LineQubit]', **kwargs, ): angle = kwargs.get('measurement_angle') signal_s_qubits = kwargs.get('signal_s_qubits') signal_t_qubits = kwargs.get('signal_t_qubits') # Here, we should be able to just apply CNOT or CZ with control signal_s/signal_t to # the desired qubit ops = [] if signal_s_qubits: for signal_qubit in signal_s_qubits: ops.append(cirq.CX(signal_qubit, *qubits)) if signal_t_qubits: for signal_qubit in signal_t_qubits: ops.append(cirq.CZ(signal_qubit, *qubits)) # Uses the custom XBasisMeasurementSignal class to handle # measurement gate = XBasisMeasurementSignal(angle).on(*qubits) ops.append(gate) return ops class CirqBuilder: def to_qasm(self) -> str: return self.circuit.to_qasm() def to_circuit(self) -> cirq.Circuit: return self.circuit class StrictCirqBuilder(CirqBuilder): """Builds a Cirq circuit strictly from a provided MCL program. StrictCirqBuilder "strictly" builds circuits by taking commands from an serialized MCL program and placing operators at the corresponding timestamps. This method DOES NOT provide a valid circuit, but acts as the intermediate step. StrictCirqBuilder deconstructs all dependent measurements into a series of controlled gates, and XBasisMeasurementSignals. In most cases, the measurement will occur before the controlled gates, and StrictCirqBuilder will incorrectly construct a circuit repesenting this. An example of this can be seen in the simple quantum teleportation example (found on page 15 of Danos' The Measurement Calculus) X_3^(s_2) M_2^(-B) E_23 X_2^(s_1) M_1^(-a) E_12 This input provides the following strict output: 1: ───H───@───XRot(0.0)───@─────────────────────── │ │ 2: ───H───@───────────────X───@───XRot(0.0)───@─── │ │ 3: ───H───────────────────────@───────────────X─── The ValidCirqBuilder class uses the output from StrictCirqBuilder to construct valid circuits via the deferred measurement principle. """ CMD_MAP = { 'Prep': OpResolver._resolve_prep, 'XCorrect': OpResolver._resolve_x_correction, 'ZCorrect': OpResolver._resolve_z_correction, 'Entangle': OpResolver._resolve_entanglement, 'Measure': OpResolver._resolve_measurement, } def __init__(self, json_input: str) -> None: self.circuit = cirq.Circuit() self.qubit_map = {} self._convert_json_to_cirq_circuit(json_input) def _convert_json_to_cirq_circuit(self, json_input: str) -> None: """Converts json input string into a Cirq circuit. Args: json_input: the JSON string to be converted. """ for el in json_input: command = next(iter(el)) command_data = el[command] # Gather information about each operation. Assumes command_data # contains maximum amount of information, and leaves determining # what to actually build to the resolver functions. # # All commands are required to have a list of qubits to act on. Every # other item is optional/command depenedent. on_qubits = self._get_qubits_from_command_ints(command_data['on_qubits']) signal_qubits = self._get_qubits_from_command_ints(command_data.get('signals', [])) signal_s_qubits = self._get_qubits_from_command_ints(command_data.get('signal_s', [])) signal_t_qubits = self._get_qubits_from_command_ints(command_data.get('signal_t', [])) measurement_angle = command_data.get('angle', None) # Rather than having a giant if/else clause, we pass everything through # to the resolver and let the functions handle deciidng what to add to # the circuit. # on_qubits is overwritten here to tyoe cirq.LineQubit, which is an exception. if command == 'Prep': qubit_no = command_data['on_qubits'][0] on_qubits = cirq.LineQubit(qubit_no) self.qubit_map[qubit_no] = on_qubits resolver = self.CMD_MAP.get(command) ops = resolver( on_qubits, signal_qubits=signal_qubits, # For X and Z corrections signal_s_qubits=signal_s_qubits, # For measurement signal_t_qubits=signal_t_qubits, # For measurement measurement_angle=measurement_angle # For measurement ) self.circuit.append(ops) def _get_qubits_from_command_ints(self, qubit_list) -> list: cirq_qubits: list[cirq.LineQubit] = [] for qubit in qubit_list: cirq_qubit: cirq.LineQubit = self.qubit_map.get(qubit) cirq_qubits.append(cirq_qubit) return cirq_qubits if len(cirq_qubits) > 0 else None class ValidCirqBuilder(CirqBuilder): """ ValidCirqBuilder implements the deferred measurement principle and adds measurements to the circuit based on the locations of the XBasisMeasurementSignal objects. To expand on the example in StrictCirqBuilder, the strict circuit: 1: ───H───@───XRot(0.0)───@─────────────────────── │ │ 2: ───H───@───────────────X───@───XRot(0.0)───@─── │ │ 3: ───H───────────────────────@───────────────X─── is converted to the following valid circuit: 1: ───H───@───────@───XRot(0.0)───M─────────────────────── │ │ 2: ───H───@───────X───@───────────────@───XRot(0.0)───M─── │ │ 3: ───H───────────────@───────────────X─────────────────── """ def __init__(self, strict_cirq_circuit): self.circuit = strict_cirq_circuit def _remove_measurements_from_circuit(self): measurement_tuples = [] measurement_operations = [] for i, moment in enumerate(self.circuit): for operation in moment.operations: if isinstance(operation.gate, XBasisMeasurementSignal): measurement_tuples.append((i, operation)) measurement_operations.append(operation) self.circuit.batch_remove(measurement_tuples) return measurement_operations def rearrange_measurement_signals(self): """Rearranges measurements by finding the location of each measurement operator in the circuit, removing it, and then appending it to the end. """ measurement_operations = self._remove_measurements_from_circuit() # Store a copy of the stripped_circuit self.stripped_circuit = self.circuit.copy() self.circuit.append(measurement_operations) def append_final_measurements(self): for i, moment in enumerate(self.circuit): for operation in moment.operations: if isinstance(operation.gate, XBasisMeasurementSignal): self.circuit.append(cirq.measure(*operation.qubits)) def main(): stdin = '' ## Reading from stdin for line in sys.stdin: # Just concatenate every line since # we're only expecting a single string stdin += line loaded_program = json.loads(stdin) print(loaded_program) strict_circuit = StrictCirqBuilder(loaded_program) print(strict_circuit.to_circuit()) valid_circuit = ValidCirqBuilder(strict_circuit.to_circuit()) valid_circuit.rearrange_measurement_signals() valid_circuit.append_final_measurements() print(valid_circuit.to_circuit()) print(valid_circuit.to_qasm()) simulator = cirq.Simulator() result = simulator.simulate(valid_circuit.circuit) print(result) if __name__ == '__main__': main()
9,250
31.233449
92
py
mcbeth
mcbeth-master/mcbeth/python/ZXBuilder.py
import pyzx as zx import sys import json """NOTE: ZXBuilder class should be read-only now. Using Cirq to handle all QASM input and output should suffice, and we can read in that QASM input to build ZX diagrams. """ class ZXBuilder: CMD_MAP = { 'Prep': zx.gates.HAD, 'XCorrect': zx.gates.NOT, 'ZCorrect': zx.gates.Z, 'Entangle': zx.gates.CZ, } def __init__(self, json_input: str) -> None: self.graph = zx.Graph() self._convert_json_to_zx_graph(json_input) return def to_qasm(self) -> str: circuit = self.to_circuit() return circuit.to_qasm() def to_circuit(self): return zx.extract_circuit(self.graph) def _convert_json_to_zx_graph(self, json_input: str) -> None: """Converts json input string into a ZX graph data structure. Args: json_input: the JSON string to be converted. """ for el in json_input: # Turns the dictionary into an iterable, # and gets the first value. command = next(iter(el)) command_data = el[command] if command == 'Prep': # We know that the first and only element of the qubit list # is the qubit index. self.graph.add_vertex(qubit=command_data['on_qubits'][0]) op = self.CMD_MAP.get(command) # Overwrites the current graph each time it's ran. self.graph = self._add_op_to_graph(op(*command_data['on_qubits']), self.graph) def _add_op_to_graph(self, operation: zx.gates.Gate, graph: zx.Graph) -> zx.Graph: """Adds an operation to a PyZX graph by converting to circuit form, adding the operation to the circuit, then transitioning back to graph form. Args: operation: the operation to add to the graph. qubits: the qubit(s) on which the operation should be added. Note that for multi-qubit gates, the control gate(s) should be provided at the beginning of the list. graph: the graph to add the operation to. Returns: The overwritten graph with the operation added to it. """ circuit = zx.extract_circuit(graph) circuit.add_gate(operation) returned_graph = circuit.to_graph() return returned_graph def main(): stdin = '' ## Reading from stdin for line in sys.stdin: # Just concatenate every line since # we're only expecting a single string stdin += line loaded_program = json.loads(stdin) print(loaded_program) zx_graph = ZXBuilder(loaded_program) qasm = zx_graph.to_qasm() print(qasm) if __name__ == '__main__': main()
2,538
27.52809
84
py
mcbeth
mcbeth-master/mcbeth/python/mcl/__init__.py
import os import sys import shutil # Rough way to add to the Python path. # TODO: find a cleaner way to implement this. You have to switch # directories currently to find it. for src in ["ocaml.so", "../_build/default/python/ocaml.so"]: if os.path.exists(src): shutil.copyfile(src, "mcl/ocaml.so") sys.path.append(".") ## Enables the shared object file to be understood by Python from ctypes import PyDLL, RTLD_GLOBAL, c_char_p curdir = dir_path = os.path.dirname(os.path.realpath(__file__)) dll = PyDLL(f"{curdir}/ocaml.so", RTLD_GLOBAL) argv_t = c_char_p * 2 argv = argv_t("ocaml.so".encode('utf-8'), None) dll.caml_startup(argv) # Import relevant files from ZXBuilder import ZXBuilder from CirqBuilder import StrictCirqBuilder, ValidCirqBuilder
757
30.583333
64
py
marmot
marmot-master/marmot/__init__.py
0
0
0
py
marmot
marmot-master/marmot/evaluation/evaluation_metrics.py
from __future__ import division # return the f1 for (y_predicted, y_actual) # use sklearn.metrics.f1_score with average='weighted' for evaluation from sklearn.metrics import f1_score, accuracy_score import logging import numpy as np from marmot.experiment.import_utils import list_of_lists from marmot.experiment.preprocessing_utils import flatten logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) logger = logging.getLogger('experiment_logger') def weighted_fmeasure(y_true, y_pred): return f1_score(y_true, y_pred, average='weighted', pos_label=None) # each span is a pair (span start, span end). span end = last span element + 1 def get_spans(sentence, good_label=1, bad_label=0): good_spans, bad_spans = [], [] prev_label = None cur_start = 0 for idx, label in enumerate(sentence): if label == good_label: if label != prev_label: if prev_label is not None: bad_spans.append((cur_start, idx)) cur_start = idx elif label == bad_label: if label != prev_label: if prev_label is not None: good_spans.append((cur_start, idx)) cur_start = idx else: print("Unknown label", label) prev_label = label # add last span if prev_label == good_label: good_spans.append((cur_start, len(sentence))) else: bad_spans.append((cur_start, len(sentence))) return(good_spans, bad_spans) def intersect_spans(true_span, pred_span): # connectivity matrix for all pairs of spans from the reference and prediction connections = [[max(0, min(t_end, p_end) - max(t_start, p_start)) for (p_start, p_end) in pred_span] for (t_start, t_end) in true_span] adjacency = np.array(connections) res = 0 # while there are non-zero elements == there are unused spans while adjacency.any(): # maximum intersection max_el = adjacency.max() max_coord = adjacency.argmax() # coordinates of the max element coord_x, coord_y = max_coord // adjacency.shape[1], max_coord % adjacency.shape[1] res += max_el # remove all conflicting edges for i in range(adjacency.shape[0]): adjacency[i][coord_y] = 0 for i in range(adjacency.shape[1]): adjacency[coord_x][i] = 0 return res # Y_true and y_pred - lists of sequences #def sequence_correlation(y_true, y_pred, good_label=1, bad_label=0): # assert(len(y_true) == len(y_pred)) # if not list_of_lists(y_true) and not list_of_lists(y_pred): # logger.warning("You provided the labels in a flat list of length {}. Assuming them to be one sequence".format(len(y_true))) # y_true = [y_true] # y_pred = [y_pred] # elif list_of_lists(y_true) and list_of_lists(y_pred): # pass # else: # logger.error("Shapes of the hypothesis and the reference don't match") # return 0 # # sentence_pred = [] # for true_sent, pred_sent in zip(y_true, y_pred): # assert(len(true_sent) == len(pred_sent)) # true_spans_1, true_spans_0 = get_spans(true_sent, good_label=good_label, bad_label=bad_label) # pred_spans_1, pred_spans_0 = get_spans(pred_sent, good_label=good_label, bad_label=bad_label) # # res_1 = intersect_spans(true_spans_1, pred_spans_1) # res_0 = intersect_spans(true_spans_0, pred_spans_0) # # sentence_pred.append((res_1+res_0)/len(true_sent)) # # return sentence_pred, np.average(sentence_pred) # Y_true and y_pred - lists of sequences def sequence_correlation(y_true, y_pred, good_label=1, bad_label=0, out='sequence_corr.out', verbose=False): assert(len(y_true) == len(y_pred)) if not list_of_lists(y_true) and not list_of_lists(y_pred): logger.warning("You provided the labels in a flat list of length {}. Assuming them to be one sequence".format(len(y_true))) y_true = [y_true] y_pred = [y_pred] elif list_of_lists(y_true) and list_of_lists(y_pred): pass else: logger.error("Shapes of the hypothesis and the reference don't match") return 0 sentence_pred = [] if verbose: out_file = open(out, 'w') for true_sent, pred_sent in zip(y_true, y_pred): assert(len(true_sent) == len(pred_sent)) true_spans_1, true_spans_0 = get_spans(true_sent, good_label=good_label, bad_label=bad_label) pred_spans_1, pred_spans_0 = get_spans(pred_sent, good_label=good_label, bad_label=bad_label) res_1 = intersect_spans(true_spans_1, pred_spans_1) res_0 = intersect_spans(true_spans_0, pred_spans_0) corr_val = (res_1+res_0)/float(len(true_sent)) # print(corr_val, type(corr_val)) if verbose: out_file.write("Reference: %s\nPrediction: %s\nCorrelation: %s\n" % (' '.join([str(t) for t in true_sent]), ' '.join([str(t) for t in pred_sent]), str(corr_val))) sentence_pred.append(corr_val) if verbose: out_file.close() return sentence_pred, np.average(sentence_pred) def sequence_correlation_weighted(y_true, y_pred, good_label=1, bad_label=0, out='sequence_corr.out', verbose=False): assert(len(y_true) == len(y_pred)) if not list_of_lists(y_true) and not list_of_lists(y_pred): logger.warning("You provided the labels in a flat list of length {}. Assuming them to be one sequence".format(len(y_true))) y_true = [y_true] y_pred = [y_pred] elif list_of_lists(y_true) and list_of_lists(y_pred): pass else: logger.error("Shapes of the hypothesis and the reference don't match") return 0 sentence_pred = [] if verbose: out_file = open(out, 'w') for true_sent, pred_sent in zip(y_true, y_pred): ref_bad = sum([1 for l in true_sent if l == bad_label]) ref_good = sum([1 for l in true_sent if l == good_label]) assert(ref_bad + ref_good == len(true_sent)) # coefficients that ensure the equal influence of good and bad classes on the overall score try: coeff_bad = len(true_sent)/(2*ref_bad) except ZeroDivisionError: coeff_bad = 0.0 try: coeff_good = len(true_sent)/(2*ref_good) except ZeroDivisionError: coeff_good = 0.0 assert(len(true_sent) == len(pred_sent)) true_spans_1, true_spans_0 = get_spans(true_sent, good_label=good_label, bad_label=bad_label) pred_spans_1, pred_spans_0 = get_spans(pred_sent, good_label=good_label, bad_label=bad_label) res_1 = intersect_spans(true_spans_1, pred_spans_1) res_0 = intersect_spans(true_spans_0, pred_spans_0) len_t_1, len_t_0 = len(true_spans_1), len(true_spans_0) len_p_1, len_p_0 = len(pred_spans_1), len(pred_spans_0) if len_t_1 + len_t_0 > len_p_1 + len_p_0: spans_ratio = (len_p_1 + len_p_0)/(len_t_1 + len_t_0) else: spans_ratio = (len_t_1 + len_t_0)/(len_p_1 + len_p_0) corr_val = (res_1*coeff_good + res_0*coeff_bad)*spans_ratio/float(len(true_sent)) # try: # corr_val = res_0/float(ref_bad) # except ZeroDivisionError: # corr_val = 1.0 # print(corr_val, type(corr_val)) if verbose: out_file.write("Reference: %s\nPrediction: %s\nCorrelation: %s\n" % (' '.join([str(t) for t in true_sent]), ' '.join([str(t) for t in pred_sent]), str(corr_val))) sentence_pred.append(corr_val) if verbose: out_file.close() return sentence_pred, np.average(sentence_pred) # sequence correlation based on full (not restricted) accuracy score # accuracy score weighted by the importance of tags times ratio of numbers of spans in the hypothesis and the reference def sequence_correlation_simple(true_tags, test_tags): seq_corr_all = [] for true_seq, test_seq in zip(true_tags, test_tags): n_spans_1_true, n_spans_0_true = 0, 0 n_spans_pred = 0 prev_true = None for tag in true_seq: if tag == 1 and prev_true == 0: n_spans_0_true += 1 elif tag == 0 and prev_true == 1: n_spans_1_true += 1 prev_true = tag if true_seq[-1] == 0: n_spans_0_true += 1 elif true_seq[-1] == 1: n_spans_1_true += 1 prev_pred = None for tag in test_seq: if tag != prev_pred: n_spans_pred += 1 prev_pred = tag n_spans_pred -= 1 lambda_0 = len(test_tags)/n_spans_0_true if n_spans_0_true != 0 else 0 lambda_1 = len(test_tags)/n_spans_1_true if n_spans_1_true != 0 else 0 weights = [] for t in true_seq: if t == 1: weights.append(lambda_1) elif t == 0: weights.append(lambda_0) else: print("Unknown reference tag: {}".format(t)) assert(len(weights) == len(true_seq)), "Expected weights array len {}, got {}".format(len(weights), len(true_tags)) acc = accuracy_score(true_seq, test_seq, sample_weight=weights) # penalises any difference in the number of spans between the reference and the hypothesis n_spans_true = n_spans_1_true + n_spans_0_true - 1 if n_spans_true == 0 and n_spans_pred == 0: seq_corr_all.append(1) else: if n_spans_true == 0 or n_spans_pred == 0: seq_corr_all.append(0) else: ratio = min(n_spans_pred/n_spans_true, n_spans_true/n_spans_pred) seq_corr_all.append(acc*ratio) return seq_corr_all, np.average(seq_corr_all) def cohens_kappa(true_tags, test_tags, verbose=False): # true positive, true negative, false positive, false negative tp, tn, fp, fn = 0, 0, 0, 0 flat_true = flatten(true_tags) flat_test = flatten(test_tags) n_tags = len(flat_true) for true, test in zip(flat_true, flat_test): if true == 0: if test == 0: tn += 1 elif test == 1: fp += 1 elif true == 1: if test == 0: fn += 1 elif test == 1: tp += 1 # observed agreement, expected agreement po = (tp + tn)/n_tags prob_1_true = (tp + fn)/n_tags prob_1_test = (tp + fp)/n_tags pe = prob_1_true*prob_1_test + (1 - prob_1_true)*(1 - prob_1_test) if verbose: print(''' Pred | | | OK | BAD True | | --------------------------- OK | %d | %d --------------------------- BAD | %d | %d --------------------------- ''' % (tp, fn, fp, tn)) print("Tp %d, fp %d, tn %d, fn %d" % (tp, fp, tn, fn)) return (po - pe)/(1 - pe)
11,031
39.410256
175
py
marmot
marmot-master/marmot/evaluation/evaluate.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function, division import sys, codecs import numpy as np from sklearn import metrics from marmot.evaluation.evaluation_metrics import weighted_fmeasure import logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) logger = logging.getLogger('testlogger') issues_accuracy = ['Terminology', 'Mistranslation', 'Omission', 'Addition', 'Untranslated', 'Accuracy'] issues_fluency = ['Agreement', 'Capitalization', 'Fluency', 'Function_words', 'Grammar', 'Morphology_(word_form)', 'Style/register', 'Typography', 'Unintelligible', 'Word_order', 'Tense/aspect/mood', 'Punctuation', 'Spelling', 'Part_of_speech'] def flatten(lofl): return [item for sublist in lofl for item in sublist] def read_wmt_annotation(f): anno = {} for line_number, line in enumerate(f): line = line.decode('utf-8').strip().split() assert len(line) == 6, "line %d: expected 6 elements per line but found %d\n" %(line_number, len(line)) sid, wid, w, a1, a2, a3 = line sid = sid.split('.') assert len(sid) == 2 and sid[0].isdigit() and sid[1].isdigit(), \ "line %d: first entry (sentence id) must be if format X.Y\n" %line_number assert wid.isdigit(), "line %d: second entry (word index) must be integer\n" %line_number sid = (int(sid[0]), int(sid[1])) wid = int(wid) assert a1.lower() == "ok" or \ a1 in issues_accuracy or \ a1.lower() in map(str.lower, issues_accuracy) or \ a1 in issues_fluency or \ a1.lower() in map(str.lower, issues_fluency), \ "line %d: unexpected error category %s\n" %(line_number, a1) assert a2.lower() in ['ok', 'fluency', 'accuracy'], "line %d: unexpected error category %s\n" %(line_number, a2) assert a3.lower() in ['ok', 'bad'], "line %d: unexpected error category %s\n" %(line_number, a3) if not sid in anno: anno[sid] = {} assert not wid in anno[sid], "line %d: duplicate entry for s%d:w%d" %(line_number, sid, wid) anno[sid][wid] = [a1.lower(), a2.lower(), a3.lower(), w] return anno def generate_random_with_prior(ref_list, options): prior_probs = [float(ref_list.count(opt))/len(ref_list) for opt in options] rand_list = [options[np.random.multinomial(1, prior_probs).argmax()] for i in range(len(ref_list))] return rand_list #print confusion matrix def print_cf(cf, name, options, f1_scores, weighted_f1): print("----- Results for %s: -----" %name) print("-------------------------------------") print("\t\tPREDICT") print("REFERENCE\t", "\t".join(options)) for linenr, line in enumerate(cf): print("%s\t\t" %options[linenr]) print("\t".join(map(str,line))) print("-------------------------------------") for i in range(len(options)): print("F1 %24s: %f" %(options[i], f1_scores[i])) print(" %24s: %f" %("WEIGHTED AVG", weighted_f1)) print("-------------------------------------") #get scores and confusion matrix #ref, hyp - lists of labels def get_scores(ref, hyp, labels, name='default name', mute=0): assert(all([r in labels for r in ref])) assert(all([h in labels for h in hyp])) assert(len(ref) == len(hyp)) label_list = list(labels) weighted_f1 = metrics.f1_score(ref, hyp, labels=label_list, average='weighted', pos_label=None) if not mute: cf_matrix = metrics.confusion_matrix(ref, hyp, labels=label_list) f1_scores = metrics.f1_score(ref, hyp, labels=label_list, average=None, pos_label=None) print_cf(cf_matrix, name, label_list, f1_scores, weighted_f1) return weighted_f1 #return list of labels for every example # TODO: change the output format of the wmt parser above, this is messing everything up! - we should have dicts containing the annotation data def choose_wmt_token_subset(anno, tok_list=None): #use all words if tok_list is None: return [anno[sid][wid][-2] for sid in anno for wid in anno[sid]] #use only words from tok_list else: # currently the index of the token in the annotation is -1, the coarse-grained annotation is at i = -2 return [anno[sid][wid][-2] for sid in anno for wid in anno[sid] if anno[sid][wid][-1] in tok_list] def significance_test(ref, hyp_res, options, granularity=20): options = list(options) assert type(hyp_res) != list, 'the performance on the hypothesis should be a float in the range: [0.0,1.0]' res_random = [] for i in range(granularity): rand = generate_random_with_prior(ref, options) res_random.append(get_scores(ref, rand, options, str(i), mute=1)) numerator = len([res for res in res_random if hyp_res <= res]) if numerator == 0: numerator = 1 p_value = numerator / granularity if p_value <= 0.05: print("The result is statistically significant with p = {}".format(p_value)) else: print("The result is not statistically significant: {}".format(p_value)) return p_value # evaluate predicted and actual hashed token instances def evaluate_hashed_predictions(ref, hyp, labels): ref_keys = ref.keys() for tok in hyp.keys(): assert tok in ref_keys, 'The reference dict must contain the token' assert len(ref[tok]) == len(hyp[tok]), 'the dicts must contain the same number of instances for each token' label_list = set(labels) result_map = {} for tok, predicted in hyp.iteritems(): actual = ref[tok] logger.info("\ttotal instances: " + str(len(predicted))) logger.info("Evaluating results for token = " + tok) hyp_res = get_scores(actual, predicted, label_list, '\''+tok+'\'') token_p_value = significance_test(actual, hyp_res, label_list) token_result = {'token': tok, 'weighted_f1': hyp_res, 'p_value': token_p_value} result_map[tok] = token_result return result_map # assert that the keys are the same (see experiment_utils.sync) # evaluate wmt formatted parallel files def evaluate_wmt(anno_ref, anno_hyp, interesting_words=[]): option_list = ['ok', 'bad'] # {'token': <token>, 'weighted_f1': <weighted_f1>, 'p_value': <p_value>} evaluation_results = {'token_level': [], 'all_data': {}} #scores and confusion matrices for individual words for tok in interesting_words: # choose_token_subset maps into [<tag>] ref_list = choose_wmt_token_subset(anno_ref, tok_list=[tok]) hyp_list = choose_wmt_token_subset(anno_hyp, tok_list=[tok]) hyp_res = get_scores(ref_list, hyp_list, option_list, '\''+tok+'\'') token_p_value = significance_test(ref_list, hyp_res, option_list) # {'token': <token>, 'weighted_f1': <weighted_f1>, 'p_value': <p_value>} token_result = {'token': tok, 'weighted_f1': hyp_res, 'p_value': token_p_value} evaluation_results.token_level.append(token_result) #scores for all interesting words or for all words if interesting_words not specified ref_list = choose_wmt_token_subset(anno_ref, tok_list=None) hyp_list = choose_wmt_token_subset(anno_hyp, tok_list=None) overall_result = get_scores(ref_list, hyp_list, option_list, 'all_words') p_value = significance_test(ref_list, overall_result, option_list) result_obj = {'weighted_f1': overall_result, 'p_value': p_value} evaluation_results['all_data'] = result_obj return evaluation_results #evaluate def main(file_ref, file_hyp, words_file): ref = read_wmt_annotation(open(file_ref)) hyp = read_wmt_annotation(open(file_hyp)) interesting_words = [] if words_file == "" else [line[:-1].decode('utf-8') for line in open(words_file)] evaluate_wmt(ref, hyp, interesting_words) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument('ref', help="reference annotations") parser.add_argument('sub', help="submission annotations") parser.add_argument('--token_subset', help="subset of tokens to evaluate") args = parser.parse_args(sys.argv[1:]) main(args.ref, args.sub, args.token_subset if args.token_subset else "")
8,375
41.30303
142
py
marmot
marmot-master/marmot/evaluation/evaluate_task2_WMT2014.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys, codecs import numpy as np issues_accuracy = ['Terminology', 'Mistranslation', 'Omission', 'Addition', 'Untranslated', 'Accuracy'] issues_fluency = ['Agreement', 'Capitalization', 'Fluency', 'Function_words', 'Grammar', 'Morphology_(word_form)', 'Style/register', 'Typography', 'Unintelligible', 'Word_order', 'Tense/aspect/mood', 'Punctuation', 'Spelling', 'Part_of_speech'] def read_annotation(f): anno = {} for line_number, line in enumerate(f): line = line.strip().split() assert len(line) == 6, "line %d: expected 6 elements per line but found %d\n" %(line_number, len(line)) sid, wid, w, a1, a2, a3 = line sid = sid.split('.') assert len(sid) == 2 and sid[0].isdigit() and sid[1].isdigit(), \ "line %d: first entry (sentence id) must be if format X.Y\n" %line_number assert wid.isdigit(), "line %d: second entry (word index) must be integer\n" %line_number sid = (int(sid[0]), int(sid[1])) wid = int(wid) assert a1.lower() == "ok" or \ a1 in issues_accuracy or \ a1.lower() in map(str.lower, issues_accuracy) or \ a1 in issues_fluency or \ a1.lower() in map(str.lower, issues_fluency), \ "line %d: unexpected error category %s\n" %(line_number, a1) assert a2.lower() in ['ok', 'fluency', 'accuracy'], "line %d: unexpected error category %s\n" %(line_number, a2) assert a3.lower() in ['ok', 'bad'], "line %d: unexpected error category %s\n" %(line_number, a3) if not sid in anno: anno[sid] = {} assert not wid in anno[sid], "line %d: duplicate entry for s%d:w%d" %(line_number, sid, wid) anno[sid][wid] = [a1.lower(), a2.lower(), a3.lower(), w] return anno def compare_anno(anno1, anno2): for sid in anno1: #print(sid) assert sid in anno2, "s%d only found in one file\n" %(sid) for wid in anno1[sid]: assert wid in anno2[sid], "s%d:w%d only found in one file\n" %(sid, wid) def get_precision(tp, fp): if tp > 0: return float(tp)/(tp+fp) return 0. def get_recall(tp, fn): if tp > 0: return float(tp)/(tp+fn) return 0. def get_f1(tp, fn, fp): precision = get_precision(tp, fp) recall = get_recall(tp, fn) f1 = 0. if precision * recall > 0: f1 = 2. * precision*recall/(precision+recall) return f1 def matrix(n): return [[0]*n for i in range(n)] def eval_sub_mute(anno1, anno2, idx, options, name): options = map(str.lower, options) short_options = [o[:7] for o in options] cf = matrix(len(options)) for sid in anno1: for wid in anno1[sid]: r = anno1[sid][wid][idx] r = options.index(r) p = anno2[sid][wid][idx] p = options.index(p) cf[p][r] += 1 weighted_average_f1 = 0. norm = 0 for i in range(len(options)): tn, tp, fn, fp = 0.,0.,0.,0. tp = cf[i][i] fp = sum(cf[i]) - tp fn = sum(l[i] for l in cf) - tp f1 = get_f1(tp, fn, fp) if i != options.index('ok'): weighted_average_f1 += f1 * (tp + fn) norm += tp + fn return weighted_average_f1/norm def eval_sub(anno1, anno2, idx, options, name): options = map(str.lower, options) short_options = [o[:7] for o in options] cf = matrix(len(options)) for sid in anno1: for wid in anno1[sid]: r = anno1[sid][wid][idx] r = options.index(r) p = anno2[sid][wid][idx] p = options.index(p) cf[p][r] += 1 print "----- Results for %s: -----" %name print "-------------------------------------" print "\tREFERENCE" print "PREDICT\t", "\t".join(short_options) for linenr, line in enumerate(cf): print "%s\t" %short_options[linenr], print "\t".join(map(str,line)) print "-------------------------------------" weighted_average_f1 = 0. norm = 0 for i in range(len(options)): print('i is: ' + str(i)) tn, tp, fn, fp = 0.,0.,0.,0. tp = cf[i][i] fp = sum(cf[i]) - tp fn = sum(l[i] for l in cf) - tp f1 = get_f1(tp, fn, fp) # Chris: this line is NOT the weighted average for the binary task if i != options.index('ok'): weighted_average_f1 += f1 * (tp + fn) norm += tp + fn print "F1 %24s: %f" %(options[i], f1) print "\n %24s: %f" %("WEIGHTED AVG", weighted_average_f1/norm) print "-------------------------------------" return weighted_average_f1/norm # Chris - working - eval only by the words we care about def eval_submission_subset(anno1, anno2, idx, options, name, token_set): options = map(str.lower, options) short_options = [o[:7] for o in options] cf = matrix(len(options)) for sid in anno1: for wid in anno1[sid]: word = anno1[sid][wid][-1].decode('utf8') #print('checking: ' + word) if word[:word.find('_')] in token_set: #print(anno1[sid][wid][-1] + ' is in token_set') r = anno1[sid][wid][idx] r = options.index(r) p = anno2[sid][wid][idx] p = options.index(p) cf[p][r] += 1 print "----- Results for %s: -----" %name print "-------------------------------------" print "\tREFERENCE" print "PREDICT\t", "\t".join(short_options) for linenr, line in enumerate(cf): print "%s\t" %short_options[linenr], print "\t".join(map(str,line)) print "-------------------------------------" weighted_average_f1 = 0. norm = 0 for i in range(len(options)): tn, tp, fn, fp = 0.,0.,0.,0. tp = cf[i][i] fp = sum(cf[i]) - tp fn = sum(l[i] for l in cf) - tp f1 = get_f1(tp, fn, fp) # Chris - for the binary case, this only gives f1 for the negative class # if i != options.index('ok'): weighted_average_f1 += f1 * (tp + fn) norm += tp + fn print "F1 %24s: %f" %(options[i], f1) print "\n %24s: %f" %("WEIGHTED AVG", weighted_average_f1/norm) print "-------------------------------------" return weighted_average_f1/norm def eval_submission_subset_mute(anno1, anno2, idx, options, name, token_set): options = map(str.lower, options) short_options = [o[:7] for o in options] cf = matrix(len(options)) for sid in anno1: for wid in anno1[sid]: word = anno1[sid][wid][-1].decode('utf8') if word[:word.find('_')] in token_set: r = anno1[sid][wid][idx] r = options.index(r) p = anno2[sid][wid][idx] p = options.index(p) cf[p][r] += 1 weighted_average_f1 = 0. norm = 0 for i in range(len(options)): tn, tp, fn, fp = 0.,0.,0.,0. tp = cf[i][i] fp = sum(cf[i]) - tp fn = sum(l[i] for l in cf) - tp f1 = get_f1(tp, fn, fp) # Chris - for the binary case, this only gives f1 for the negative class # if i != options.index('ok'): weighted_average_f1 += f1 * (tp + fn) norm += tp + fn return weighted_average_f1/norm def eval_a1(anno1, anno2): options = ["ok"] + issues_fluency + issues_accuracy eval_sub(anno1, anno2, 0, options, "multiclass") def eval_a2(anno1, anno2): options = ['ok', 'fluency', 'accuracy'] #OPTIONS = map(str.lower, issues_fluency + issues_accuracy + ["OK"]) eval_sub(anno1, anno2, 1, options, "3-class") def eval_a3_subset(anno1, anno2, token_subset): options = ['ok', 'bad'] relevant_toks = set(codecs.open(token_subset, encoding='utf8').read().split('\n')) print(relevant_toks) eval_submission_subset(anno1, anno2, 2, options, "binary", relevant_toks) def eval_a3(anno1, anno2): options = ['ok', 'bad'] eval_sub(anno1, anno2, 2, options, "binary") def eval_a3_significance(anno1, anno2): options = ['ok', 'bad'] res = eval_sub(anno1, anno2, 2, options, "binary") rand_res = [] for i in range(20): rand_anno = generate_random(anno1, 2, options) rand_res.append( eval_sub(anno1, rand_anno, 2, options, 'binary') ) print rand_res if all( [max(0,res-i) for i in rand_res] ): print "Statistically significant with p = 0.05" else: print "The result is not statistically significant" def eval_a3_subset_significance(anno1, anno2, token_subset): options = ['ok', 'bad'] relevant_toks = set(codecs.open(token_subset, encoding='utf8').read().split('\n')) print(relevant_toks) res = eval_submission_subset(anno1, anno2, 2, options, "binary", relevant_toks) rand_res = [] for i in range(20): rand_anno = generate_random(anno1, 2, options) rand_res.append( eval_submission_subset_mute(anno1, rand_anno, 2, options, 'binary', relevant_toks) ) print rand_res if all( [max(0,res-i) for i in rand_res] ): print "Statistically significant with p = 0.05" else: print "The result is not statistically significant" #Varvara - generate random annotations for <idx> column #Is it better to generate annotations separately for every type, or to generate only one fine-grained? def generate_random(anno, idx, options): rand_anno = {} probs = np.zeros(len(options)) for sid in anno: for wid in anno[sid]: cur_val = options.index( anno[sid][wid][idx] ) probs[cur_val] += 1.0 #observed distribution probs = probs/sum([len(anno[i]) for i in anno]) for sid in anno: if sid not in rand_anno: rand_anno[sid] = {} for wid in anno[sid]: rand_anno[sid][wid] = anno[sid][wid][:idx]+[options[np.random.multinomial(1,probs).argmax()]]+anno[sid][wid][idx+1:] return rand_anno if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument('ref', help="reference annotations") parser.add_argument('sub', help="submission annotations") parser.add_argument('--token_subset', help="subset of tokens to evaluate") args = parser.parse_args(sys.argv[1:]) ref = read_annotation(open(args.ref)) submission = read_annotation(open(args.sub)) token_subset = "" if args.token_subset: token_subset = args.token_subset compare_anno(ref, submission) compare_anno(submission, ref) # Chris: other evaluation types commented out, since we currently only care about GOOD / BAD # eval_a1(ref, submission) # eval_a2(ref, submission) if token_subset: eval_a3_subset_significance(ref, submission, token_subset) else: eval_a3_significance(ref, submission)
10,974
34.633117
128
py
marmot
marmot-master/marmot/evaluation/evaluate_task1_WMT2016.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ evaluate_wmt16_task1 ~~~~~~~~~~~~~~~~~~~~ Evaluation script for Task 1 of the WMT Quality Estimation challenge. :copyright: (c) 2016 by Fabio Kepler :licence: MIT Usage: evaluate_wmt16_task1 [options] REFERENCE_FILE SUBMISSION_FILE... evaluate_wmt16_task1 (-h | --help | --version) Arguments: REFERENCE_FILE path to a reference file in either a tab-separated format <METHOD NAME> <SEGMENT NUMBER> <SEGMENT SCORE> <SEGMENT RANK> or with one HTER score per line; format will be detected based on the first line SUBMISSION_FILE... list of submission files with the same format options as REFERENCE_FILE Options: -s --scale FACTOR FACTOR by which to scale (multiply) input scores -v --verbose log debug messages -q --quiet log only warning and error messages Other: -h --help show this help message and exit --version show version and exit """ import logging import numpy as np import sklearn.metrics as sk from docopt import docopt from scipy.stats.stats import pearsonr, spearmanr, rankdata __prog__ = "evaluate_wmt16_task1" __title__ = 'Evaluate WMT2016 Quality Estimation Task 1' __summary__ = 'Evaluation script for Task 1 of the WMT Quality Estimation challenge.' __uri__ = 'https://gist.github.com/kepler/6043a41ed8f3ed0be1e68c5942b99734' __version__ = '0.0.1' __author__ = 'Fabio Kepler' __email__ = '[email protected]' __license__ = 'MIT' __copyright__ = 'Copyright 2016 Fabio Kepler' logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) def delta_average(y_true, y_rank): """ Calculate the DeltaAvg score. References: ? :param y_true: array of reference score (not rank) of each segment. :param y_rank: array of rank of each segment. :return: the absolute delta average score. """ sorted_ranked_indexes = np.argsort(y_rank) y_length = len(sorted_ranked_indexes) delta_avg = 0 max_quantiles = y_length // 2 set_value = np.sum(y_true[sorted_ranked_indexes[np.arange(y_length)]]) / y_length quantile_values = { head: np.sum(y_true[sorted_ranked_indexes[np.arange(head)]]) / head for head in range(2, y_length) } # cache values, since there are many that are repeatedly computed between various quantiles for quantiles in range(2, max_quantiles + 1): # current number of quantiles quantile_length = y_length // quantiles quantile_sum = 0 for head in np.arange(quantile_length, quantiles * quantile_length, quantile_length): quantile_sum += quantile_values[head] delta_avg += quantile_sum / (quantiles - 1) - set_value if max_quantiles > 1: delta_avg /= (max_quantiles - 1) else: delta_avg = 0 return abs(delta_avg) def parse_submission(file_name): """ <METHOD NAME>\t<SEGMENT NUMBER>\t<SEGMENT SCORE>\t<SEGMENT RANK> """ with open(file_name) as f: sentences = [line.strip().split('\t') for line in f] method = set(map(lambda x: x[0], sentences)) if len(method) > 1: logger.error('There is more than one method name in file "{}": {}'.format(file_name, method)) return None, None method = list(method)[0] segments = np.asarray(list(map(lambda x: x[1:], sentences)), dtype=float) if segments[:, 0].max() != segments.shape[0]: logger.error('Wrong number of segments in file "{}": found {}, expected {}.'.format(file_name, segments.shape[0], segments[:, 0].max())) return None, None return method, segments def read_hter(file_name): with open(file_name) as f: scores = np.array([line.strip() for line in f], dtype='float') method = file_name segments = np.vstack((np.arange(1, scores.shape[0] + 1), scores, rankdata(scores, method='ordinal'))).T return method, segments def read_file(file_name): with open(file_name) as f: if '\t' in f.readline().strip(): return parse_submission(file_name) else: return read_hter(file_name) def run(arguments): reference_file = arguments['REFERENCE_FILE'] submission_files = arguments['SUBMISSION_FILE'] reference_method, reference_segments = read_file(reference_file) if arguments['--scale']: reference_segments[:, 1] *= float(arguments['--scale']) scoring_values = [] ranking_values = [] for submission in submission_files: submission_method, submission_segments = read_file(submission) if arguments['--scale']: submission_segments[:, 1] *= float(arguments['--scale']) if submission_segments[:, 1].any(): pearson = pearsonr(reference_segments[:, 1], submission_segments[:, 1])[0] # keep only main value mae = sk.regression.mean_absolute_error(reference_segments[:, 1], submission_segments[:, 1]) rmse = np.sqrt(sk.regression.mean_squared_error(reference_segments[:, 1], submission_segments[:, 1])) scoring_values.append((submission_method, pearson, mae, rmse)) if submission_segments[:, 2].any(): spearman = spearmanr(reference_segments[:, 2], submission_segments[:, 2])[0] # keep only main value delta_avg = delta_average(reference_segments[:, 1], submission_segments[:, 2]) # DeltaAvg needs reference scores instead of rank ranking_values.append((submission_method, spearman, delta_avg)) scoring = np.array(scoring_values, dtype=[('Method', 'object'), ('Pearson r', float), ('MAE', float), ('RMSE', float)]) logger.info('Scoring results:') logger.info('{:20} {:20} {:20} {:20}'.format('Method', 'Pearson r', 'MAE', 'RMSE')) for submission in np.sort(scoring, order=['Pearson r', 'MAE', 'RMSE']): logger.info('{:20s} {:<20.10} {:<20.10} {:<20.10}'.format(*submission)) ranking = np.array(ranking_values, dtype=[('Method', 'object'), ('Spearman rho', float), ('DeltaAvg', float)]) logger.info('Ranking results:') logger.info('{:20} {:20} {:20}'.format('Method', 'Spearman rho', 'DeltaAvg')) for submission in np.sort(ranking, order=['Spearman rho', 'DeltaAvg']): logger.info('{:20} {:<20.10} {:<20.10}'.format(*submission)) if __name__ == '__main__': options = docopt(__doc__, argv=None, help=True, version=__version__, options_first=False) if options['--verbose']: logger.setLevel(level='DEBUG') elif options['--quiet']: logger.setLevel(level='WARNING') run(options)
6,708
38.233918
144
py
marmot
marmot-master/marmot/evaluation/evaluation_utils.py
from __future__ import print_function, division from sklearn.metrics import f1_score def write_res_to_file(test_file, test_predictions, output_file=''): if output_file == '': output_file = test_file+'.predictions' output = open(output_file, 'w') try: for idx, line in enumerate(open(test_file)): chunks = line.decode('utf-8').strip().split('\t') prefix = u'\t'.join(chunks[:5]) # TODO: check if number of strings and predictions match output.write('%s\t%s\n' % (prefix.encode('utf-8'), test_predictions[idx].encode('utf-8'))) except IndexError: print('Predictions size:', len(test_predictions), ', current number: ', idx) finally: output.close() return output_file def compare_vocabulary(datasets): ''' :param datasets: a list of datasets, which are all lists of token sequences :return: a list of objects describing each dataset ''' def get_vocab(dataset): return set([w for seq in dataset for w in seq]) vocabs = [get_vocab(dataset) for dataset in datasets] common_vocab = set.intersection(*vocabs) out = [] for i, vocab in enumerate(vocabs): out.append({'coverage': len(common_vocab) / len(vocab)}) return out # evaluation without checking the sentence numbers # odd_col -- number of columns that should be ignored (e.g. system ID) def evaluate_simple(ref_file, hyp_file, odd_col=0, check_words=True, average='weighted'): tags_ref, tags_hyp = [], [] tags_dict = {u'BAD': 0, u'OK': 1} for idx, (ref, hyp) in enumerate(zip(open(ref_file), open(hyp_file))): chunks_ref = ref.decode('utf-8').strip().split('\t') chunks_hyp = hyp.decode('utf-8').strip().split('\t') if chunks_ref[2] != chunks_hyp[2+odd_col] and check_words: print("Words don't match at string", idx) return -1 tags_ref.append(chunks_ref[-1]) tags_hyp.append(chunks_hyp[-1]) # all_tags.append(chunks_ref[-1]) # all_tags.append(chunks_hyp[-1]) # return f1_score([tags_dict[i] for i in tags_ref], [tags_dict[i] for i in tags_hyp]) return f1_score([tags_dict[i] for i in tags_ref], [tags_dict[i] for i in tags_hyp], average=average)
2,264
36.131148
104
py
marmot
marmot-master/marmot/evaluation/__init__.py
0
0
0
py
marmot
marmot-master/marmot/evaluation/check_error_types.py
from __future__ import division, print_function import codecs import argparse from itertools import groupby def get_error_distribution(error_file): with codecs.open(error_file, encoding='utf8') as tsv: # remove newlines row_data = [ l.rstrip().split('\t') for l in tsv ] class_data = [ {'predicted': l[0], 'actual': l[1], 'word': l[2], 'class': l[3] } for l in row_data ] class_data = sorted(class_data, key=lambda x: x['class']) for key, group in groupby(class_data, lambda x: x['class']): group_instances = list(group) print('ERROR TYPE: {}\tTOTAL INSTANCES: {}'.format(key, str(len(group_instances)))) accuracy = sum([1 for i in group_instances if i['predicted'] == i['actual']]) / len(group_instances) print("group accuracy: {}".format(str(accuracy))) # for i in group_instances: # print(i) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-i', '--input', type=str, help='input file -- sentences tagged with errors') args = parser.parse_args() get_error_distribution(args.input)
1,124
42.269231
108
py
marmot
marmot-master/marmot/evaluation/tests/test_evaluate.py
# test the evaluation functions import unittest import os from marmot.evaluation.evaluate import significance_test, get_scores, read_wmt_annotation, evaluate_wmt, evaluate_hashed_predictions class TestEvaluate(unittest.TestCase): def setUp(self): module_path = os.path.dirname(__file__) gold_standard_path = 'test_data/gold_standard.de-en' test_annotations_path = 'test_data/test_annotations.de-en' # get sample test data and gold standard self.gold_standard_path = os.path.join(module_path, gold_standard_path) self.test_annotations_path = os.path.join(module_path, test_annotations_path) self.ref_annnotations = read_wmt_annotation(open(self.gold_standard_path)) self.test_annnotations = read_wmt_annotation(open(self.test_annotations_path)) self.interesting_tokens = [] def test_significance_test(self): test_options = ['ok', 'bad'] actual = ['ok', 'bad', 'ok', 'bad', 'ok', 'ok', 'ok', 'ok', 'ok', 'ok'] hyp = ['ok', 'bad', 'ok', 'bad', 'ok', 'ok', 'ok', 'ok', 'ok', 'ok'] hyp_res = get_scores(actual, hyp, test_options) p_value = significance_test(actual, hyp_res, test_options) self.assertTrue(p_value == 0.05) p_value = significance_test(actual, hyp_res, test_options, granularity=100) self.assertTrue(p_value <= 0.05) def test_evaluate_wmt(self): evaluation_results = evaluate_wmt(self.ref_annnotations, self.test_annnotations) self.assertTrue(evaluation_results['all_data']['p_value'] == 0.05) print('WMT evaluation_results') print(evaluation_results) def test_evaluate_hashed_predictions(self): # get sample hashed predictions test_ref = {'apple': ['good', 'bad', 'bad'], 'fish': ['good']} test_hyp = {'apple': ['good', 'bad', 'bad'], 'fish': ['good']} test_labels = set(['good', 'bad']) evaluation_results = evaluate_hashed_predictions(test_ref, test_hyp, test_labels) print('Token hash evaluation_results') print(evaluation_results) self.assertTrue(evaluation_results['apple']['weighted_f1'] == 1.0) if __name__ == '__main__': unittest.main()
2,221
39.4
132
py
marmot
marmot-master/marmot/evaluation/tests/test_evaluation_utils.py
# test the evaluation functions import unittest import os from marmot.evaluation.evaluation_utils import compare_vocabulary class TestEvaluate(unittest.TestCase): def test_compare_vocabulary(self): dataset1 = [['this', 'is', 'sentence', 'number', 'one'], ['another', 'list', 'comes', 'next', '.']] dataset2 = [['this', 'is', 'sentence', 'number', 'two'], ['this', 'is', 'sentence', 'number', 'two']] comparisons = compare_vocabulary([dataset1, dataset2]) # 0.4 = fraction of words in sentence 1 covered by sentence 2 self.assertEqual(comparisons[0]['coverage'], 0.4) # 0.8 = fraction of words in sentence 1 covered by sentence 2 self.assertEqual(comparisons[1]['coverage'], 0.8) if __name__ == '__main__': unittest.main()
794
32.125
109
py
marmot
marmot-master/marmot/evaluation/tests/test_evaluation_metrics.py
import unittest from marmot.evaluation.evaluation_metrics import get_spans, intersect_spans, sequence_correlation class TestEvaluationUtils(unittest.TestCase): def setUp(self): self.predictions = [] cur_pred = [] for line in open('test_data/hyp'): if line.strip() == '': self.predictions.append(cur_pred) cur_pred = [] else: cur_pred.append(line.strip()) self.predictions.append(cur_pred) self.references = [] cur_ref = [] for line in open('test_data/ref'): if line.strip() == '': self.references.append(cur_ref) cur_ref = [] else: cur_ref.append(line.strip()) self.references.append(cur_ref) def test_get_spans(self): sentence = [1, 1, 0, 1, 0, 1, 1, 1, 0] good_s, bad_s = get_spans(sentence) # test that right spans are extracted self.assertItemsEqual(good_s, [(0, 2), (3, 4), (5, 8)]) self.assertItemsEqual(bad_s, [(2, 3), (4, 5), (8, 9)]) all_spans = sorted(good_s + bad_s) all_items = [t for a_list in [sentence[b:e] for (b, e) in all_spans] for t in a_list] # test that the extracted spans cover the whole sequence self.assertItemsEqual(sentence, all_items) def test_intersect_spans(self): true_sentence = [1, 1, 0, 1, 0, 1, 1, 1, 0, 0] sentence = [0, 1, 1, 1, 0, 1, 1, 1, 1, 1] good_s, bad_s = get_spans(sentence) good_t, bad_t = get_spans(true_sentence) res_1 = intersect_spans(good_t, good_s) res_0 = intersect_spans(bad_t, bad_s) self.assertEqual(res_1, 4) self.assertEqual(res_0, 1) def test_sequence_correlation(self): sent_scores, total = sequence_correlation(self.references, self.predictions, good_label='OK', bad_label='BAD') self.assertAlmostEqual(sent_scores[0], 0.31578947) self.assertAlmostEqual(sent_scores[1], 0.8) self.assertAlmostEqual(total, 0.55789473) # def test_alternative_label(self): # sequence_correlation(y_true, y_pred, good_label='OK', bad_label='BAD') if __name__ == '__main__': unittest.main()
2,260
35.467742
118
py
marmot
marmot-master/marmot/features/target_token_left_feature_extractor.py
from marmot.features.feature_extractor import FeatureExtractor from marmot.util.ngram_window_extractor import left_context, right_context class TargetTokenLeftFeatureExtractor(FeatureExtractor): ''' Target features: - target token - left and right windows of the target token ''' def __init__(self, context_size=1): self.context_size = context_size def get_features(self, context_obj): token = context_obj['token'] left = ' '.join(left_context(context_obj['target'], token, context_size=self.context_size, idx=context_obj['index'])) return [token, left] def get_feature_names(self): return ['token', 'left_context']
697
30.727273
125
py
marmot
marmot-master/marmot/features/gaussian_feature_extractor.py
import numpy as np import sys from marmot.features.feature_extractor import FeatureExtractor from scipy.stats import norm #generate features from distributions of some existing set of examples class GaussianFeatureExtractor(FeatureExtractor): def __init__(self, features): # the distribution for each feature - pair (mean, std.deviation) # TODO: there is an error here - we want the distribution for each feature, this overwrites every time self.distributions = {tok: (np.average(vec), np.std(vec)) for tok, feature_vectors in features.items() for vec in feature_vectors.T} #context_obj may contain only token def get_features(self, context_obj): token = context_obj['token'] if not self.distributions.has_key(token): sys.stderr.write('No distribution for token %s\n' % token.encode('utf-8')) return [] return np.array([norm.rvs(loc=avg, scale=std) for avg,std in self.distributions[token]])
981
41.695652
140
py
marmot
marmot-master/marmot/features/google_translate_feature_extractor.py
from goslate import Goslate from nltk import word_tokenize import ipdb from marmot.features.feature_extractor import FeatureExtractor from marmot.exceptions.no_data_error import NoDataError class GoogleTranslateFeatureExtractor(FeatureExtractor): def __init__(self, lang='en'): self.lang = lang def get_features(self, context_obj): if 'source' not in context_obj: raise NoDataError('source', context_obj, 'GoogleTranslateFeatureExtractor') if 'pseudo-reference' in context_obj: translation = context_obj['pseudo-reference'] else: gs = Goslate() translation = word_tokenize(gs.translate(' '.join(context_obj['source']), self.lang)) if context_obj['token'] in translation: return [1] return [0] def get_feature_names(self): return ["pseudo-reference"]
893
29.827586
97
py
marmot
marmot-master/marmot/features/pos_feature_extractor.py
import sys from subprocess import Popen, PIPE from marmot.features.feature_extractor import FeatureExtractor from marmot.exceptions.no_data_error import NoDataError from marmot.exceptions.no_resource_error import NoResourceError class POSFeatureExtractor(FeatureExtractor): """ POS for source and target words, tagged with TreeTagger """ def __init__(self, tagger=None, par_file_src=None, par_file_tg=None): self.tagger = tagger self.par_src = par_file_src self.par_tg = par_file_tg # tag words if context_obj has no tagging # returns tags for all words in sentence def _call_tagger(self, tok_list, lang='tg'): par_file = self.par_tg if lang == 'tg' else self.par_src out = [] if self.tagger is None: raise NoResourceError('tagger', 'POSFeatureExtractor') if par_file is None: raise NoResourceError('tagging parameters', 'POSFeatureExtractor') p = Popen([self.tagger, '-quiet', par_file], stdin=PIPE, stdout=PIPE) out = p.communicate(input='\n'.join([tok.encode('utf-8') for tok in tok_list]))[0].decode('utf-8').split('\n') return out def get_features(self, context_obj): if 'target_pos' not in context_obj: if 'target' in context_obj and context_obj['target'] is not None: context_obj['target_pos'] = self._call_tagger(context_obj['target']) else: raise NoDataError('target_pos', context_obj, 'POSFeatureExtractor') if 'source_pos' not in context_obj: if 'source' in context_obj and context_obj['source'] is not None: context_obj['source_pos'] = self._call_tagger(context_obj['source'], lang='src') else: raise NoDataError('source_pos', context_obj, 'POSFeatureExtractor') # extract POS features: # - target POS # - source POS (may be more than 1) # - something else? tg_pos = context_obj['target_pos'][context_obj['index']] if context_obj['target_pos'] != [] else '' src_pos = [] if 'source_pos' in context_obj and context_obj['source_pos'] != [] and 'alignments' in context_obj: align_idx = context_obj['alignments'][context_obj['index']] if align_idx is not None: src_pos = context_obj['source_pos'][align_idx] else: src_pos = '__unaligned__' return [tg_pos, src_pos] def get_feature_names(self): return ['target_pos', 'aligned_source_pos']
2,564
40.370968
118
py
marmot
marmot-master/marmot/features/feature_extractor.py
# an abstract class representing a feature extractor # a feature extractor takes an object like # { 'token': <token>, index: <idx>, 'source': [<source toks>]', 'target': [<target toks>], 'tag': <tag>} # and extracts features for that instance # IMPORTANT - different feature extractors require different fields in the context object # - it's up to the feature extractor implementation to determine which fields it actually needs, and to ensure that the object contains them from abc import ABCMeta, abstractmethod class FeatureExtractor(object): __metaclass__ = ABCMeta # subclasses must provide the implementation @abstractmethod def get_features(self, context_obj): """ returns a list of features (one or more) :param context_obj: { 'token': <token>, index: <idx>, 'source': [<source toks>]', 'target': [<target toks>], 'tag': <tag>, ...} :return: [<feature1>, <feature2>, ...] - some fields MAY BE MISSING from a given context object, the implementation needs to check for its fields """ pass @abstractmethod def get_feature_names(self): """ :return: a list of strings representing names of the features returned by get_features """ pass
1,259
38.375
140
py
marmot
marmot-master/marmot/features/word2vec_feature_extractor.py
import numpy as np import sys import logging from gensim.models import Word2Vec from marmot.features.feature_extractor import FeatureExtractor logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) logger = logging.getLogger('experiment_logger') def left_context(token_list, token, context_size, idx): left_window = [] if idx <= 0: #return ['_START_' for i in range(context_size)] return ['<s>' for i in range(context_size)] assert(token_list[idx] == token) for i in range(idx-context_size, idx): if i < 0: #left_window.append('_START_') left_window.append('<s>') else: left_window.append(token_list[i]) return left_window def right_context(token_list, token, context_size, idx): right_window = [] if idx >= len(token_list): #return ['_END_' for i in range(context_size)] return ['</s>' for i in range(context_size)] assert(token_list[idx] == token), "Token in token list: {}, index: {}, token provided in parameters: {}".format(token_list[idx], idx, token) for i in range(idx+1, idx+context_size+1): if i > len(token_list)-1: #right_window.append('_END_') right_window.append('</s>') else: right_window.append(token_list[i]) return right_window class Word2VecFeatureExtractor(FeatureExtractor): ''' Combine a feature vector for an ngram of arbitrary length from w2v vectors of all words of the ngram. <combination> --- method of combination of word vectors: - 'sum' (default) - 'avg' ''' def __init__(self, w2v_file, combination='sum', context_size=2): self.model = Word2Vec.load(w2v_file) self.default_vector = np.average(np.array([self.model[x] for x in self.model.vocab]), axis=0).reshape((-1,)) self.zero_vector = np.zeros(self.default_vector.shape[0]) self.context_size = context_size if combination == 'sum': self.combine = np.sum elif combination == 'avg': self.combine = np.average else: print("Unknown combination type provided: '{}'".format(combination)) def extract_word2vec_vector(self, token): if token in self.model.vocab: return self.model[token] #elif token == '_START_' or token == '_END_': elif token == '<s>' or token == '</s>': return self.zero_vector else: return self.default_vector # extract the word2vec features for a window of tokens around the target token def get_features(self, context_obj): if 'token' not in context_obj or len(context_obj['token']) == 0: print("No token in context object ", context_obj) sys.exit() if 'index' not in context_obj or len(context_obj['index']) == 0: print("No index in context object ", context_obj) sys.exit() if context_obj['index'][0] == context_obj['index'][1]: print("Invalid token indices in sentence: ", context_obj['target']) print("Indices: {}, {}".format(context_obj['index'][0], context_obj['index'][1])) phrase_vector = [] # if 'token' contains more than 1 string, 'index' should be an interval if type(context_obj['token']) is list or type(context_obj['token']) is np.ndarray: left_window = left_context(context_obj['target'], context_obj['token'][0], self.context_size, context_obj['index'][0]) right_window = right_context(context_obj['target'], context_obj['token'][-1], self.context_size, context_obj['index'][-1]-1) phrase_vector = [self.extract_word2vec_vector(tok) for tok in context_obj['token']] phrase_vector = self.combine(phrase_vector, axis=0) else: left_window = left_context(context_obj['target'], context_obj['token'], self.context_size, context_obj['index']) right_window = right_context(context_obj['target'], context_obj['token'], self.context_size, context_obj['index']) phrase_vector = self.extract_word2vec_vector(context_obj['token']) vector = [] for tok in left_window: vector.extend(self.extract_word2vec_vector(tok)) vector.extend(phrase_vector) for tok in right_window: vector.extend(self.extract_word2vec_vector(tok)) return np.hstack(vector) # TODO: there should be a name for every feature def get_feature_names(self): return ['w2v'+str(i) for i in range(len(self.default_vector)*(self.context_size*2 + 1))]
4,647
41.642202
144
py
marmot
marmot-master/marmot/features/lm_left_feature_extractor.py
from __future__ import print_function import codecs from subprocess import call import os from collections import defaultdict from marmot.features.feature_extractor import FeatureExtractor from marmot.util.ngram_window_extractor import extract_window, left_context, right_context from marmot.experiment.import_utils import mk_tmp_dir # Class that extracts various LM features # Calling an external LM is very slow, so a new lm is constructed with nltk class LMLeftFeatureExtractor(FeatureExtractor): def __init__(self, ngram_file=None, corpus_file=None, srilm=None, tmp_dir=None, order=5): # generate ngram counts if ngram_file is None: if srilm is None: if 'SRILM' in os.environ: srilm = os.environ['SRILM'] else: print("No SRILM found") return if corpus_file is None: print ("No corpus for LM generation") return srilm_ngram_count = os.path.join(srilm, 'ngram-count') tmp_dir = mk_tmp_dir(tmp_dir) lm_file = os.path.join(tmp_dir, 'lm_file') ngram_file = os.path.join(tmp_dir, 'ngram_count_file') call([srilm_ngram_count, '-text', corpus_file, '-lm', lm_file, '-order', str(order), '-write', ngram_file]) self.lm = defaultdict(int) for line in codecs.open(ngram_file, encoding='utf-8'): chunks = line[:-1].split('\t') if len(chunks) == 2: new_tuple = tuple(chunks[0].split()) new_number = int(chunks[1]) self.lm[new_tuple] = new_number else: print("Wrong ngram-counts file format at line '", line[:-1], "'") self.order = order def check_lm(self, ngram, side='left'): for i in range(self.order, 0, -1): if side == 'left': cur_ngram = ngram[len(ngram)-i:] elif side == 'right': cur_ngram = ngram[:i] else: print("Unknown parameter 'side'", side) return 0 if tuple(cur_ngram) in self.lm: return i return 0 def get_backoff(self, ngram): assert(len(ngram) == 3) ngram = tuple(ngram) # trigram (1, 2, 3) if ngram in self.lm: return 1.0 # two bigrams (1, 2) and (2, 3) elif ngram[:2] in self.lm and ngram[1:] in self.lm: return 0.8 # bigram (2, 3) elif ngram[1:] in self.lm: return 0.6 # bigram (1, 2) and unigram (3) elif ngram[:2] in self.lm and ngram[2:] in self.lm: return 0.4 # unigrams (2) and (3) elif ngram[1:2] in self.lm and ngram[2:] in self.lm: return 0.3 # unigram (3) elif ngram[2:] in self.lm: return 0.2 # all words unknown else: return 0.1 # returns a set of features related to LM # currently extracting: highest order ngram including the word and its LEFT context, # highest order ngram including the word and its RIGHT context def get_features(self, context_obj): idx = context_obj['index'] left_ngram = left_context(context_obj['target'], context_obj['token'], context_size=self.order-1, idx=idx) + [context_obj['token']] left_ngram_order = self.check_lm(left_ngram, side='left') left_trigram = left_context(context_obj['target'], context_obj['token'], context_size=2, idx=idx) + [context_obj['token']] backoff_left = self.get_backoff(left_trigram) return [left_ngram_order, backoff_left] def get_feature_names(self): return ['highest_order_ngram_left', 'backoff_behavior_left']
3,824
36.135922
139
py
marmot
marmot-master/marmot/features/tfidf_feature_extractor.py
# build a tfidf index in def __init__(corpus_file_name)
55
55
55
py
marmot
marmot-master/marmot/features/token_count_feature_extractor.py
from __future__ import print_function, division import numpy as np from marmot.features.feature_extractor import FeatureExtractor import logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) logger = logging.getLogger('testlogger') class TokenCountFeatureExtractor(FeatureExtractor): # extract the word2vec features for a window of tokens around the target token def get_features(self, context_obj): feature_funcs = [self.source_count, self.target_count, self.source_target_ratio] return np.array([f(context_obj) for f in feature_funcs]) def source_count(self, context_obj): if 'source' in context_obj and type(context_obj['source']) == list: return float(len(context_obj['source'])) logger.warn('you are trying to extract the source token count from a context object without a "source" field') return 0.0 def target_count(self, context_obj): if 'target' in context_obj and type(context_obj['target'] == list): return float(len(context_obj['target'])) logger.warn('you are trying to extract the target token count from a context object without a "target" field') return 0.0 def source_target_ratio(self, context_obj): s_count = self.source_count(context_obj) t_count = self.target_count(context_obj) return s_count / t_count def get_feature_names(self): return ['source_token_count', 'target_token_count', 'source_target_token_count_ratio']
1,542
38.564103
118
py
marmot
marmot-master/marmot/features/previous_tag_feature_extractor.py
from __future__ import print_function from marmot.features.feature_extractor import FeatureExtractor from marmot.exceptions.no_data_error import NoDataError class PreviousTagFeatureExtractor(FeatureExtractor): ''' Extracts the tag of the previous token ''' def __init__(self): pass def get_features(self, context_obj): if 'sequence_tags' not in context_obj: raise NoDataError('sequence_tags', context_obj, 'PreviousTagFeatureExtractor') idx = context_obj['index'] if idx == 0: return ['_START_'] else: return [context_obj['sequence_tags'][idx-1]] def get_feature_names(self): return ['previous_token_tag']
721
25.740741
90
py
marmot
marmot-master/marmot/features/pseudo_reference_feature_extractor.py
from nltk import word_tokenize from marmot.features.feature_extractor import FeatureExtractor from marmot.exceptions.no_data_error import NoDataError class PseudoReferenceFeatureExtractor(FeatureExtractor): ''' A feature that extracts the pseudo-reference feature for pseudo-references provided in a file (as an alternative to GoogleTranslateFeatureExtractor) ''' def __init__(self, ref_file): self.pseudo_references = [] for line in open(ref_file): self.pseudo_references.append(word_tokenize(line[:-1].decode('utf-8'))) def get_features(self, context_obj): if 'sentence_id' not in context_obj: raise NoDataError('sentence_id', context_obj, 'PseudoReferenceFeatureExtractor') out = 1 if context_obj['token'] in self.pseudo_references[context_obj['sentence_id']] else 0 return [out] def get_feature_names(self): return ["pseudo-reference"]
953
33.071429
100
py
marmot
marmot-master/marmot/features/lm_feature_extractor.py
from __future__ import print_function import codecs from subprocess import call import os from collections import defaultdict from marmot.features.feature_extractor import FeatureExtractor from marmot.util.ngram_window_extractor import extract_window, left_context, right_context from marmot.experiment.import_utils import mk_tmp_dir # Class that extracts various LM features # Calling an external LM is very slow, so a new lm is constructed with nltk class LMFeatureExtractor(FeatureExtractor): def __init__(self, ngram_file=None, corpus_file=None, srilm=None, tmp_dir=None, order=5): # generate ngram counts if ngram_file is None: if srilm is None: if 'SRILM' in os.environ: srilm = os.environ['SRILM'] else: print("No SRILM found") return if corpus_file is None: print ("No corpus for LM generation") return srilm_ngram_count = os.path.join(srilm, 'ngram-count') tmp_dir = mk_tmp_dir(tmp_dir) lm_file = os.path.join(tmp_dir, 'lm_file') ngram_file = os.path.join(tmp_dir, 'ngram_count_file') call([srilm_ngram_count, '-text', corpus_file, '-lm', lm_file, '-order', str(order), '-write', ngram_file]) self.lm = defaultdict(int) for line in codecs.open(ngram_file, encoding='utf-8'): chunks = line[:-1].split('\t') if len(chunks) == 2: new_tuple = tuple(chunks[0].split()) new_number = int(chunks[1]) self.lm[new_tuple] = new_number else: print("Wrong ngram-counts file format at line '", line[:-1], "'") self.order = order def check_lm(self, ngram, side='left'): for i in range(self.order, 0, -1): if side == 'left': cur_ngram = ngram[len(ngram)-i:] elif side == 'right': cur_ngram = ngram[:i] else: print("Unknown parameter 'side'", side) return 0 if tuple(cur_ngram) in self.lm: return i return 0 def get_backoff(self, ngram): assert(len(ngram) == 3) ngram = tuple(ngram) # trigram (1, 2, 3) if ngram in self.lm: return 1.0 # two bigrams (1, 2) and (2, 3) elif ngram[:2] in self.lm and ngram[1:] in self.lm: return 0.8 # bigram (2, 3) elif ngram[1:] in self.lm: return 0.6 # bigram (1, 2) and unigram (3) elif ngram[:2] in self.lm and ngram[2:] in self.lm: return 0.4 # unigrams (2) and (3) elif ngram[1:2] in self.lm and ngram[2:] in self.lm: return 0.3 # unigram (3) elif ngram[2:] in self.lm: return 0.2 # all words unknown else: return 0.1 # returns a set of features related to LM # currently extracting: highest order ngram including the word and its LEFT context, # highest order ngram including the word and its RIGHT context def get_features(self, context_obj): idx = context_obj['index'] left_ngram = left_context(context_obj['target'], context_obj['token'], context_size=self.order-1, idx=idx) + [context_obj['token']] right_ngram = [context_obj['token']] + right_context(context_obj['target'], context_obj['token'], context_size=self.order-1, idx=idx) left_ngram_order = self.check_lm(left_ngram, side='left') right_ngram_order = self.check_lm(right_ngram, side='right') left_trigram = left_context(context_obj['target'], context_obj['token'], context_size=2, idx=idx) + [context_obj['token']] middle_trigram = extract_window(context_obj['target'], context_obj['token'], idx=idx) right_trigram = [context_obj['token']] + right_context(context_obj['target'], context_obj['token'], context_size=2, idx=idx) # TODO: instead of _START_ there should be <s> backoff_left = self.get_backoff(left_trigram) backoff_middle = self.get_backoff(middle_trigram) backoff_right = self.get_backoff(right_trigram) return [left_ngram_order, right_ngram_order, backoff_left, backoff_middle, backoff_right] def get_feature_names(self): return ['highest_order_ngram_left', 'highest_order_ngram_right', 'backoff_behavior_left', 'backoff_behavior_middle', 'backoff_behavior_right']
4,560
40.09009
150
py
marmot
marmot-master/marmot/features/dictionary_feature_extractor.py
import string import nltk from marmot.features.feature_extractor import FeatureExtractor class DictionaryFeatureExtractor(FeatureExtractor): ''' Extract binary features indicating that the word belongs to a list of special tokens: - stopwords - punctuation symbols - proper names - numbers ''' def __init__(self, language='', stopwords=[], punctuation=[], proper=[]): # all lists can be defined by user, otherwise are taken from python and nltk self.punctuation = punctuation if punctuation else string.punctuation self.stopwords = stopwords if stopwords else nltk.corpus.stopwords.words(language) if language else nltk.corpus.stopwords.words() self.proper = proper # returns: # ( is stopword, is punctuation, is proper name, is digit ) def get_features(self, context_obj): tok = context_obj['token'] return [int(tok in self.stopwords), int(tok in self.punctuation), int(tok in self.proper if self.proper else tok.istitle()), int(tok.isdigit())] def get_feature_names(self): return ['is_stopword', 'is_punctuation', 'is_proper_noun', 'is_digit']
1,159
37.666667
152
py
marmot
marmot-master/marmot/features/__init__.py
0
0
0
py
marmot
marmot-master/marmot/features/target_token_feature_extractor.py
from marmot.features.feature_extractor import FeatureExtractor from marmot.util.ngram_window_extractor import left_context, right_context class TargetTokenFeatureExtractor(FeatureExtractor): ''' Target features: - target token - left and right windows of the target token ''' def __init__(self, context_size=1): self.context_size = context_size def get_features(self, context_obj): token = context_obj['token'] left = ' '.join(left_context(context_obj['target'], token, context_size=self.context_size, idx=context_obj['index'])) right = ' '.join(right_context(context_obj['target'], token, context_size=self.context_size, idx=context_obj['index'])) return [token, left, right] def get_feature_names(self): return ['token', 'left_context', 'right_context']
845
35.782609
127
py
marmot
marmot-master/marmot/features/syntactic_feature_extractor.py
from marmot.features.feature_extractor import FeatureExtractor import sys # get word's parent def get_parent(dependencies, word_id, sentence_length, context_obj): for head in dependencies: if len(dependencies[head]) == 0: continue for dep in dependencies[head]: if dep['id'] == word_id: return head, dep['type'] # assert(head < sentence_length), "Length: {}, head: {} for word {}, context object: {}".format(sentence_length, head, word_id, context_obj) return None, None def get_siblings(dependencies, word_id, head_id, sentence_length): left_sib = 0 right_sib = sentence_length if head_id is None: return None, None if head_id not in dependencies: return None, None for dep in dependencies[head_id]: if dep['id'] > word_id and dep['id'] < left_sib: left_sib = dep['id'] if dep['id'] < word_id and dep['id'] > right_sib: right_sib = dep['id'] if right_sib == sentence_length: right_sib = None if left_sib == 0: left_sib = None return left_sib, right_sib class SyntacticFeatureExtractor(FeatureExtractor): def get_features_one_lang(self, dependencies, sentence, sentence_pos, word_idx, context_obj): sent_len = len(sentence) head, dep_type = get_parent(dependencies, word_idx, sent_len, context_obj) grandhead, grand_dep_type = get_parent(dependencies, head, sent_len, context_obj) left_sib, right_sib = get_siblings(dependencies, word_idx, head, sent_len) # try: # head_token = sentence[head] if head is not None else 'None' # except: # print('Head: {}, sentence: {}'.format(head, sentence)) # sys.exit() if head == -1 and dep_type == 'root': head_token = 'None' head_pos = 'None' else: head_token = sentence[head] if head is not None else 'None' head_pos = sentence_pos[head] if head is not None else 'None' if grandhead == -1 and grand_dep_type == 'root': grand_head_token = 'None' grand_head_pos = 'None' else: grand_head_token = sentence[grandhead] if grandhead is not None else 'None' grand_head_pos = sentence_pos[grandhead] if grandhead is not None else 'None' left_sib_token = sentence[left_sib] if left_sib is not None else 'None' left_sib_pos = sentence_pos[left_sib] if left_sib is not None else 'None' right_sib_token = sentence[right_sib] if right_sib is not None else 'None' right_sib_pos = sentence_pos[right_sib] if right_sib is not None else 'None' token = sentence[word_idx] token_pos = sentence_pos[word_idx] synt_features = [str(dep_type), token + '|' + str(dep_type)] synt_features.extend([head_token + '|' + token, head_pos + '|' + token_pos]) synt_features.extend([left_sib_token + '|' + token, right_sib_token + '|' + token, left_sib_pos + '|' + token_pos, right_sib_pos + '|' + token_pos]) synt_features.extend([grand_head_token + '|' + head_token + '|' + token, grand_head_pos + '|' + head_pos + '|' + token_pos]) return synt_features def get_features(self, context_obj): index = context_obj['index'] src_index = context_obj['alignments'][index] if len(context_obj['source']) != len(context_obj['source_synt_pos']) or src_index is None: synt_features_src = ['None' for i in range(10)] else: synt_features_src = self.get_features_one_lang(context_obj['source_dependencies'], context_obj['source'], context_obj['source_pos'], src_index, context_obj) if len(context_obj['target']) != len(context_obj['target_synt_pos']): synt_features_tg = ['None' for i in range(10)] else: synt_features_tg = self.get_features_one_lang(context_obj['target_dependencies'], context_obj['target'], context_obj['target_pos'], index, context_obj) return synt_features_tg + synt_features_src def get_feature_names(self): return ['dep_type', 'token+dep_type', 'head+token', 'head_pos+token_pos', 'left_sib+token', 'right_sib+token', 'left_sib_pos+token_pos', 'right_sib_pos+token_pos', 'grandhead+head+token', 'grandhead_pos+head_pos+token_pos', 'src_dep_type', 'src_token+dep_type', 'src_head+token', 'src_head_pos+token_pos', 'src_left_sib+token', 'src_right_sib+token', 'src_left_sib_pos+token_pos', 'src_right_sib_pos+token_pos', 'src_grandhead+head+token', 'src_grandhead_pos+head_pos+token_pos']
4,907
44.444444
168
py
marmot
marmot-master/marmot/features/source_word2vec_feature_extractor.py
import numpy as np import sys import logging from gensim.models import Word2Vec from marmot.features.feature_extractor import FeatureExtractor logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) logger = logging.getLogger('experiment_logger') def left_context(token_list, token, context_size, idx): left_window = [] if idx <= 0: #return ['_START_' for i in range(context_size)] return ['<s>' for i in range(context_size)] assert(token_list[idx] == token) for i in range(idx-context_size, idx): if i < 0: #left_window.append('_START_') left_window.append('<s>') else: left_window.append(token_list[i]) return left_window def right_context(token_list, token, context_size, idx): right_window = [] if idx >= len(token_list): #return ['_END_' for i in range(context_size)] return ['</s>' for i in range(context_size)] assert(token_list[idx] == token), "Token in token list: {}, index: {}, token provided in parameters: {}".format(token_list[idx], idx, token) for i in range(idx+1, idx+context_size+1): if i > len(token_list)-1: #right_window.append('_END_') right_window.append('</s>') else: right_window.append(token_list[i]) return right_window class SourceWord2VecFeatureExtractor(FeatureExtractor): ''' Combine a feature vector for a SOURCE ngram of arbitrary length from w2v vectors of all words of the ngram. <combination> --- method of combination of word vectors: - 'sum' (default) - 'avg' ''' def __init__(self, w2v_file, combination='sum', context_size=2): self.model = Word2Vec.load(w2v_file) self.default_vector = np.average(np.array([self.model[x] for x in self.model.vocab]), axis=0).reshape((-1,)) self.zero_vector = np.zeros(self.default_vector.shape[0]) self.context_size = context_size if combination == 'sum': self.combine = np.sum elif combination == 'avg': self.combine = np.average else: print("Unknown combination type provided: '{}'".format(combination)) def extract_word2vec_vector(self, token): if token in self.model.vocab: return self.model[token] #elif token == '_START_' or token == '_END_' or token == '_unaligned_': elif token == '<s>' or token == '</s>' or token == '_unaligned_': return self.zero_vector else: return self.default_vector def get_features(self, context_obj): if context_obj['index'][0] == context_obj['index'][1]: print("Invalid token indices in sentence: ", context_obj['target']) print("Indices: {}, {}".format(context_obj['index'][0], context_obj['index'][1])) left_window, right_window = [], [] phrase_vector = [] # source phrase exists if 'source_token' in context_obj and len(context_obj['source_token']) > 0: phrase_vector = [self.extract_word2vec_vector(tok) for tok in context_obj['source_token']] if type(phrase_vector) is not np.ndarray and type(phrase_vector) is not list: print("Phrase vector type: {}, changed after collecting word2vec vectors".format(type(phrase_vector))) print("Context object: ", context_obj) sys.exit() phrase_vector = self.combine(phrase_vector, axis=0) if type(phrase_vector) is not np.ndarray and type(phrase_vector) is not list: print("Phrase vector type: {}, changed after executing 'combine'".format(type(phrase_vector))) print("Context object: ", context_obj) sys.exit() if self.context_size > 0: left_window = left_context(context_obj['source'], context_obj['source_token'][0], self.context_size, context_obj['source_index'][0]) right_window = right_context(context_obj['source'], context_obj['source_token'][-1], self.context_size, context_obj['source_index'][-1]) # no source phrase else: alignments = [] if 'alignments' not in context_obj: print("No alignment provided") sys.exit() # if 'token' contains more than 1 string, 'index' should be an interval if type(context_obj['token']) is list or type(context_obj['token']) is np.ndarray: for i in range(context_obj['index'][0], context_obj['index'][1]): alignments.append(context_obj['alignments'][i]) else: alignments = [context_obj['alignments'][context_obj['index']]] alignments = sorted(filter(lambda a: a != None, alignments)) if len(alignments) == 0: left_window = ['_unaligned_' for i in range(self.context_size)] right_window = ['_unaligned_' for i in range(self.context_size)] phrase_vector = self.extract_word2vec_vector('_unaligned_') elif len(alignments) > 1: left_window = left_context(context_obj['source'], context_obj['source'][alignments[0]], self.context_size, alignments[0]) right_window = right_context(context_obj['source'], context_obj['source'][alignments[-1]], self.context_size, alignments[-1]) phrase_vector = [self.extract_word2vec_vector(tok) for tok in context_obj['source'][alignments[0]:alignments[-1]+1]] phrase_vector = self.combine(phrase_vector, axis=0) elif len(alignments) == 1: src_token = context_obj['source'][alignments[0]] left_window = left_context(context_obj['source'], src_token, self.context_size, alignments[0]) right_window = right_context(context_obj['source'], src_token, self.context_size, alignments[0]) phrase_vector = self.extract_word2vec_vector(src_token) else: print("Golakteko opasnoste!!!!!11") vector = [] for tok in left_window: vector.extend(self.extract_word2vec_vector(tok)) # tmp fix if something strange happens and phrase_vector is not a list if type(phrase_vector) is not np.ndarray and type(phrase_vector) is not list: print("Phrase vector type: {}".format(type(phrase_vector))) print("Context object: ", context_obj) phrase_vector = self.zero_vector vector.extend(phrase_vector) for tok in right_window: vector.extend(self.extract_word2vec_vector(tok)) return np.hstack(vector) # TODO: there should be a name for every feature def get_feature_names(self): return ['sw2v'+str(i) for i in range(len(self.default_vector)*(self.context_size*2 + 1))]
6,892
47.886525
152
py
marmot
marmot-master/marmot/features/source_lm_feature_extractor.py
import codecs from subprocess import call import os from collections import defaultdict from marmot.features.feature_extractor import FeatureExtractor from marmot.util.ngram_window_extractor import left_context, right_context from marmot.experiment.import_utils import mk_tmp_dir from marmot.exceptions.no_data_error import NoDataError # Class that extracts various LM features for source class SourceLMFeatureExtractor(FeatureExtractor): def __init__(self, ngram_file=None, corpus_file=None, srilm=None, tmp_dir=None, order=5): # generate ngram counts if ngram_file is None: if srilm is None: if 'SRILM' in os.environ: srilm = os.environ['SRILM'] else: print("No SRILM found") return if corpus_file is None: print ("No corpus for LM generation") return srilm_ngram_count = os.path.join(srilm, 'ngram-count') tmp_dir = mk_tmp_dir(tmp_dir) lm_file = os.path.join(tmp_dir, 'lm_file') ngram_file = os.path.join(tmp_dir, 'ngram_count_file') call([srilm_ngram_count, '-text', corpus_file, '-lm', lm_file, '-order', str(order), '-write', ngram_file]) self.lm = defaultdict(int) for line in codecs.open(ngram_file, encoding='utf-8'): chunks = line[:-1].split('\t') if len(chunks) == 2: new_tuple = tuple(chunks[0].split()) new_number = int(chunks[1]) self.lm[new_tuple] = new_number else: print("Wrong ngram-counts file format at line '", line[:-1], "'") self.order = order def check_lm(self, ngram, side='left'): for i in range(self.order, 0, -1): if side == 'left': cur_ngram = ngram[len(ngram)-i:] elif side == 'right': cur_ngram = ngram[:i] else: print("Unknown parameter 'side'", side) return 0 if tuple(cur_ngram) in self.lm: return i return 0 # returns a set of features related to LM # currently extracting: highest order ngram including the word and its LEFT context, # highest order ngram including the word and its RIGHT context def get_features(self, context_obj): if 'source' not in context_obj: raise NoDataError('source', context_obj, 'SourceLMFeatureExtractor') if 'alignments' not in context_obj: raise NoDataError('alignments', context_obj, 'SourceLMFeatureExtractor') align_idx = context_obj['alignments'][context_obj['index']] # unaligned if align_idx is None: return [0, 0] align_token = context_obj['source'][align_idx] left_ngram = left_context(context_obj['source'], align_token, context_size=2, idx=align_idx) + [align_token] right_ngram = [align_token] + right_context(context_obj['source'], align_token, context_size=2, idx=align_idx) left_ngram_order = self.check_lm(left_ngram, side='left') right_ngram_order = self.check_lm(right_ngram, side='right') return [left_ngram_order, right_ngram_order] def get_feature_names(self): return ['source_highest_order_ngram_left', 'source_highest_order_ngram_right']
3,413
40.634146
119
py
marmot
marmot-master/marmot/features/wordnet_feature_extractor.py
from __future__ import print_function import sys from nltk import wordnet as wn from collections import defaultdict from marmot.features.feature_extractor import FeatureExtractor class WordnetFeatureExtractor(FeatureExtractor): def __init__(self, src_lang, tg_lang, wordnet_pos=None): # there's a bug in nltk: with lang = 'eng' some words are not found, but with lang = 'en' they are, # and in the wordnet.langs() 'en' is not listed, only 'eng' if src_lang != 'en' and src_lang not in wn.wordnet.langs(): print('The language', src_lang, 'is not supported by Open Multilingual Wordnet\n') self.src_lang = '' else: self.src_lang = src_lang if tg_lang != 'en' and tg_lang not in wn.wordnet.langs(): print('The language', tg_lang, 'is not supported by Open Multilingual Wordnet\n') self.tg_lang = '' else: self.tg_lang = tg_lang # mapping between parts of speech returned by a POS-tagger and WordNet parts of speech: # wn.ADJ, wn.ADV, wn.NOUN, wn.VERB etc. if wordnet_pos is not None: self.pos_dict = defaultdict(lambda: None) for line in open(wordnet_pos): a_pair = line[:-1].decode('utf-8').split('\t') if len(a_pair) != 2: sys.stderr.write('Incorrect format of the mapping file') self.pos_dict[a_pair[0]] = a_pair[1] else: self.pos_dict = None def get_features(self, context_obj): # TODO: should it throw an error when src or alignments don't exist? if self.src_lang == '' or 'alignments' not in context_obj or 'source' not in context_obj or context_obj['alignments'][context_obj['index']] == None: src_count = 0 else: src_align = context_obj['alignments'][context_obj['index']] src_token = context_obj['source'][src_align] src_count = len(wn.wordnet.synsets(src_token, lang=self.src_lang)) if self.tg_lang == '': tg_count = 0 else: tg_count = len(wn.wordnet.synsets(context_obj['token'], lang=self.tg_lang)) return [src_count, tg_count] def get_feature_names(self): return ["polysemy_count_source", "polysemy_count_target"]
2,330
42.166667
156
py
marmot
marmot-master/marmot/features/alignment_feature_extractor.py
from __future__ import print_function import os import sys import errno from marmot.features.feature_extractor import FeatureExtractor from marmot.util.alignments import train_alignments, align_sentence from marmot.util.ngram_window_extractor import left_context, right_context from marmot.exceptions.no_data_error import NoDataError # all features that require source dictionary class AlignmentFeatureExtractor(FeatureExtractor): def __init__(self, align_model='', src_file='', tg_file='', tmp_dir=None, context_size=1): if tmp_dir is None: tmp_dir = os.getcwd() try: os.makedirs(tmp_dir) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(tmp_dir): pass else: raise self.tmp_dir = tmp_dir self.model = '' # no alignment model if align_model == '': # if src_file and tg_file are not empty, it means that an alignment model needs to be trained # (self.model doesn't have to be defined, if context objects have alignments) if os.path.isfile(src_file) and os.path.isfile(tg_file): self.model = train_alignments(src_file, tg_file, self.tmp_dir) else: self.model = align_model self.context_size = context_size def get_features(self, context_obj): if 'source' not in context_obj or context_obj['source'] is None: raise NoDataError('source', context_obj, 'AlignmentFeatureExtractor') if 'target' not in context_obj or context_obj['source'] is None or context_obj['target'] is None: raise NoDataError('target', context_obj, 'AlignmentFeatureExtractor') if 'alignments' not in context_obj: raise NoDataError('alignments', context_obj, 'AlignmentFeatureExtractor') # if self.model == '': # raise NoDataError('alignments', context_obj, 'AlignmentFeatureExtractor') # context_obj['alignments'] = align_sentence(context_obj['source'], context_obj['target'], self.model) # source word(s) try: align_idx = context_obj['alignments'][context_obj['index']] except IndexError: print("{} items in the alignment, needed {}-th".format(len(context_obj['alignments']), context_obj['index'])) print(context_obj['alignments'], context_obj['target'], context_obj['source']) sys.exit() # if word is unaligned - no source and no source contexts if align_idx == None: return ['__unaligned__', '|'.join(['__unaligned__' for i in range(self.context_size)]), '|'.join(['__unaligned__' for i in range(self.context_size)])] # TODO: find contexts for all words aligned to the token (now only 1st word) else: left = '|'.join(left_context(context_obj['source'], context_obj['source'][align_idx], context_size=self.context_size, idx=align_idx)) right = '|'.join(right_context(context_obj['source'], context_obj['source'][align_idx], context_size=self.context_size, idx=align_idx)) aligned_to = context_obj['source'][align_idx] return [aligned_to, left, right] def get_feature_names(self): return ['aligned_token', 'src_left_context', 'src_right_context']
3,353
44.945205
162
py
marmot
marmot-master/marmot/features/unbabel/bias_feature_extractor.py
from marmot.features.feature_extractor import FeatureExtractor class BiasFeatureExtractor(FeatureExtractor): def get_features(self, context_obj): return [1] def get_feature_names(self): return ['bias']
230
20
62
py
marmot
marmot-master/marmot/features/unbabel/paired_feature_extractor.py
from marmot.features.feature_extractor import FeatureExtractor from marmot.util.ngram_window_extractor import left_context, right_context class PairedFeatureExtractor(FeatureExtractor): ''' Paired features: - target token + left context - target token + right context - target token + source token - target POS + source POS ''' def get_features(self, context_obj): token = context_obj['token'] left = ' '.join(left_context(context_obj['target'], token, context_size=1, idx=context_obj['index'])) right = ' '.join(right_context(context_obj['target'], token, context_size=1, idx=context_obj['index'])) tg_pos = context_obj['target_pos'][context_obj['index']] if context_obj['target_pos'] != [] else '' align_idx = context_obj['alignments'][context_obj['index']] if align_idx is None: src_token = '__unaligned__' src_pos = '__unaligned__' else: src_token = context_obj['source'][align_idx] src_pos = context_obj['source_pos'][align_idx] return [token + '|' + left, token + '|' + right, token + '|' + src_token, tg_pos + '|' + src_pos] def get_feature_names(self): return ['token+left', 'token+right', 'token+source', 'POS+sourcePOS']
1,309
39.9375
111
py
marmot
marmot-master/marmot/features/unbabel/trilexical_left_feature_extractor.py
from marmot.features.feature_extractor import FeatureExtractor from marmot.util.ngram_window_extractor import left_context class TrilexicalLeftFeatureExtractor(FeatureExtractor): ''' Trilexical features: - target token + left context + source token - target token + right context + source token ''' def get_features(self, context_obj): token = context_obj['token'] left = ' '.join(left_context(context_obj['target'], token, context_size=1, idx=context_obj['index'])) align_idx = context_obj['alignments'][context_obj['index']] if align_idx is None: aligned_to = '__unaligned__' else: aligned_to = context_obj['source'][align_idx] return [token + '|' + left + '|' + aligned_to] def get_feature_names(self): return ['target+left+source']
858
32.038462
109
py
marmot
marmot-master/marmot/features/unbabel/paired_left_feature_extractor.py
from marmot.features.feature_extractor import FeatureExtractor from marmot.util.ngram_window_extractor import left_context, right_context class PairedLeftFeatureExtractor(FeatureExtractor): ''' Paired features: - target token + left context - target token + right context - target token + source token - target POS + source POS ''' def get_features(self, context_obj): token = context_obj['token'] left = ' '.join(left_context(context_obj['target'], token, context_size=1, idx=context_obj['index'])) tg_pos = context_obj['target_pos'][context_obj['index']] if context_obj['target_pos'] != [] else '' align_idx = context_obj['alignments'][context_obj['index']] if align_idx is None: src_token = '__unaligned__' src_pos = '__unaligned__' else: src_token = context_obj['source'][align_idx] src_pos = context_obj['source_pos'][align_idx] return [token + '|' + left, token + '|' + src_token, tg_pos + '|' + src_pos] def get_feature_names(self): return ['token+left', 'token+source', 'POS+sourcePOS']
1,165
36.612903
109
py
marmot
marmot-master/marmot/features/unbabel/token_count_feature_extractor.py
from __future__ import print_function, division import numpy as np from marmot.features.feature_extractor import FeatureExtractor import logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) logger = logging.getLogger('testlogger') class TokenCountFeatureExtractor(FeatureExtractor): # extract the word2vec features for a window of tokens around the target token def get_features(self, context_obj): feature_funcs = [self.source_count, self.target_count, self.source_target_ratio] return np.array([f(context_obj) for f in feature_funcs]) def source_count(self, context_obj): if 'source' in context_obj and type(context_obj['source']) == list: return float(len(context_obj['source'])) logger.warn('you are trying to extract the source token count from a context object without a "source" field') return 0.0 def target_count(self, context_obj): if 'target' in context_obj and type(context_obj['target'] == list): return float(len(context_obj['target'])) logger.warn('you are trying to extract the target token count from a context object without a "target" field') return 0.0 def source_target_ratio(self, context_obj): s_count = self.source_count(context_obj) t_count = self.target_count(context_obj) return s_count / t_count def get_feature_names(self): return ['source_token_count', 'target_token_count', 'source_target_token_count_ratio']
1,542
38.564103
118
py
marmot
marmot-master/marmot/features/unbabel/trilexical_feature_extractor.py
from marmot.features.feature_extractor import FeatureExtractor from marmot.util.ngram_window_extractor import left_context, right_context class TrilexicalFeatureExtractor(FeatureExtractor): ''' Trilexical features: - target token + left context + source token - target token + right context + source token ''' def get_features(self, context_obj): token = context_obj['token'] left = ' '.join(left_context(context_obj['target'], token, context_size=1, idx=context_obj['index'])) right = ' '.join(right_context(context_obj['target'], token, context_size=1, idx=context_obj['index'])) align_idx = context_obj['alignments'][context_obj['index']] if align_idx is None: aligned_to = '__unaligned__' else: aligned_to = context_obj['source'][align_idx] return [token + '|' + left + '|' + aligned_to, token + '|' + right + '|' + aligned_to] def get_feature_names(self): return ['target+left+source', 'target+right+source']
1,044
37.703704
111
py
marmot
marmot-master/marmot/features/unbabel/__init__.py
0
0
0
py
marmot
marmot-master/marmot/features/phrase/num_translations_feature_extractor.py
from __future__ import division import sys import numpy as np from collections import defaultdict from nltk import FreqDist from gensim.corpora import TextCorpus from marmot.features.feature_extractor import FeatureExtractor class NumTranslationsFeatureExtractor(FeatureExtractor): # .f2e file def __init__(self, lex_prob_file, corpus_file): self.lex_prob = defaultdict(list) for line in open(lex_prob_file): chunks = line[:-1].split() self.lex_prob[chunks[1]].append(float(chunks[2])) corpus = TextCorpus(input=corpus_file) self.corpus_freq = FreqDist([word for line in corpus.get_texts() for word in line]) self.thresholds = [0.01, 0.05, 0.1, 0.2, 0.5] def get_features(self, context_obj): #sys.stderr.write("Start NumTranslationsFeatureExtractor\n") if 'source_token' not in context_obj or len(context_obj['source_token']) == 0: return ['0.0' for i in range(len(self.thresholds)*2)] translations, translations_weighted = [], [] for thr in self.thresholds: all_words, all_words_weighted = [], [] for word in context_obj['source_token']: trans = [fl for fl in self.lex_prob[word] if fl >= thr] all_words.append(len(trans)) all_words_weighted.append(len(trans)*self.corpus_freq.freq(word)) translations.append(np.average(all_words)) translations_weighted.append(np.average(all_words_weighted)) #sys.stderr.write("Finish NumTranslationsFeatureExtractor\n") return [str(t) for t in translations] + [str(t) for t in translations_weighted] def get_feature_names(self): return ['source_translations_001_freq', 'source_translations_005_freq', 'source_translations_01_freq', 'source_translations_02_freq', 'source_translations_05_freq', 'source_translations_001_freq_weighted', 'source_translations_005_freq_weighted', 'source_translations_01_freq_weighted', 'source_translations_02_freq_weighted', 'source_translations_05_freq_weighted']
2,228
41.865385
91
py
marmot
marmot-master/marmot/features/phrase/nouns_bigram_feature_extractor.py
from collections import defaultdict from marmot.features.feature_extractor import FeatureExtractor def get_nouns(language): nouns = defaultdict(str) nouns['english'] = ['NN'] nouns['spanish'] = ['NC', 'NMEA', 'NMON', 'NP'] return nouns[language] class VerbsBigramFeatureExtractor(FeatureExtractor): ''' Number of punctuation marks in source and target: <source_number>_<target_number> ''' def __init__(self, lang='english'): self.nouns = get_nouns(lang) def is_noun(self, word): for n in self.nouns: if word.startswith(n): return True return False def get_feature(self, context_obj): source_idx = context_obj['source_index'] target_idx = context_obj['index'] source_nouns, target_nouns = 0, 0 for w in context_obj['target_pos'][target_idx[0]:target_idx[1]]: if self.is_noun(w): target_nouns += 1 for w in context_obj['source_pos'][source_idx[0]:source_idx[1]]: if self.is_noun(w): source_nouns += 1 feature_val = str(source_nouns) + "_" + str(target_nouns) return feature_val def get_feature_name(self): return "source_target_nouns_numbers" def get_features(self, context_obj): return [self.get_feature(context_obj)] def get_feature_names(self): return [self.get_feature_name()]
1,434
28.285714
72
py
marmot
marmot-master/marmot/features/phrase/pos_context_left_feature_extractor.py
from marmot.features.feature_extractor import FeatureExtractor from marmot.exceptions.no_data_error import NoDataError from marmot.util.ngram_window_extractor import left_context, right_context class POSContextLeftFeatureExtractor(FeatureExtractor): ''' same as POSContextFeatureExtractor, but without right context ''' def get_features(self, context_obj): if 'target_pos' not in context_obj: raise NoDataError('target_pos', context_obj, 'POSContextFeatureExtractor') if 'source_pos' not in context_obj: raise NoDataError('source_pos', context_obj, 'POSContextFeatureExtractor') left_src = left_context(context_obj['source_pos'], context_obj['source_pos'][context_obj['source_index'][0]], context_size=1, idx=context_obj['source_index'][0]) right_src = right_context(context_obj['source_pos'], context_obj['source_pos'][context_obj['source_index'][1]-1], context_size=1, idx=context_obj['source_index'][1]-1) left_tg = left_context(context_obj['target_pos'], context_obj['target_pos'][context_obj['index'][0]], context_size=1, idx=context_obj['index'][0]) return [left_src[0], right_src[0], left_tg[0]] def get_feature_names(self): return ['left_source_context_pos', 'right_source_context_pos', 'left_target_context_pos']
1,329
50.153846
175
py
marmot
marmot-master/marmot/features/phrase/ngram_frequencies_feature_extractor.py
from __future__ import division import os import sys from subprocess import call from collections import defaultdict from marmot.features.feature_extractor import FeatureExtractor class NgramFrequenciesFeatureExtractor(FeatureExtractor): def __init__(self, tmp_dir, ngram_count_file=None, corpus=None, srilm=None): if srilm is None: srilm = os.environ['SRILM'] if 'SRILM' in os.environ else None if ngram_count_file is None: if corpus is None or not os.path.exists(corpus): print("No ngram count file and no corpus provided") sys.exit() if not os.path.exists(corpus): print("Corpus doesn't exist") sys.exit() ngram_count_binary = os.path.join(srilm, 'ngram-count') if srilm is None or not os.path.exists(ngram_count_binary): print("No SRILM found, ngram counts can't be extracted") sys.exit() # TODO: run srilm to get the ngram model ngram_count_file = os.path.join(tmp_dir, 'tst_counts') call([ngram_count_binary, '-order', '3', '-text', corpus, '-write', ngram_count_file]) # get ngram counts ngrams = defaultdict(list) for line in open(ngram_count_file): chunks = line[:-1].decode('utf-8').split('\t') if len(chunks) != 2: print("Wrong format of the ngram file '{}', bad line: {}".format(ngram_count_file, line)) sys.exit() words = chunks[0].split() ngrams[len(words)].append((chunks[0], int(chunks[1]))) self.ngrams = {} self.ngram_quartiles = {} for order in ngrams: sorted_ngrams = sorted(ngrams[order], key=lambda(k, v): v) self.ngrams[order] = {i: j for (i, j) in sorted_ngrams} ngrams_len = len(sorted_ngrams) q1, q2, q3 = int(ngrams_len/4), int(ngrams_len/2), int(ngrams_len*3/4) # 1 -- low frequency, 4 -- high frequency self.ngram_quartiles[order] = {1: {i: j for (i, j) in sorted_ngrams[:q1]}, 2: {i: j for (i, j) in sorted_ngrams[q1:q2]}, 3: {i: j for (i, j) in sorted_ngrams[q2:q3]}, 4: {i: j for (i, j) in sorted_ngrams[q3:]}} def get_quartiles_frequency(self, order, source_token): quart_frequencies = [] ngram_list = [' '.join(source_token[i:i+order]) for i in range(len(source_token) - order + 1)] if len(ngram_list) == 0: return [0.0, 0.0, 0.0, 0.0] for quart in [1, 2, 3, 4]: quart_count = 0 for ngram in ngram_list: if ngram in self.ngram_quartiles[order][quart]: # print("Yes!") quart_count += 1 quart_frequencies.append(quart_count/len(ngram_list)) return quart_frequencies def get_features(self, context_obj): #sys.stderr.write("Start NgramFrequenciesFeatureExtractor\n") if len(context_obj['source_token']) == 0: return ['0' for i in range(15)] source_token = context_obj['source_token'] unigram_quart = self.get_quartiles_frequency(1, source_token) bigram_quart = self.get_quartiles_frequency(2, source_token) trigram_quart = self.get_quartiles_frequency(3, source_token) bigram_list = [' '.join(source_token[i:i+2]) for i in range(len(source_token) - 1)] trigram_list = [' '.join(source_token[i:i+3]) for i in range(len(source_token) - 2)] percent_unigram = sum([1 for word in source_token if word in self.ngrams[1]])/len(source_token) percent_bigram = sum([1 for word in bigram_list if word in self.ngrams[2]])/len(source_token) percent_trigram = sum([1 for word in trigram_list if word in self.ngrams[3]])/len(source_token) #sys.stderr.write("Finish NgramFrequenciesFeatureExtractor\n") return [str(n) for n in unigram_quart] + [str(n) for n in bigram_quart] + [str(n) for n in trigram_quart] + [str(percent_unigram), str(percent_bigram), str(percent_trigram)] def get_feature_names(self): return ['avg_unigram_quart_1', 'avg_unigram_quart_2', 'avg_unigram_quart_3', 'avg_unigram_quart_4', 'avg_bigram_quart_1', 'avg_bigram_quart_2', 'avg_bigram_quart_3', 'avg_bigram_quart_4', 'avg_trigram_quart_1', 'avg_trigram_quart_2', 'avg_trigram_quart_3', 'vg_trigram_quart_4', 'percent_unigram', 'percent_bigram', 'percent_trigram']
4,810
46.633663
181
py
marmot
marmot-master/marmot/features/phrase/pos_feature_extractor.py
from __future__ import division from collections import defaultdict import logging from marmot.features.feature_extractor import FeatureExtractor logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) logger = logging.getLogger('testlogger') # collections of content tags for some languages def get_tags(lang): content, verbs, nouns, pronouns = defaultdict(list), defaultdict(list), defaultdict(list), defaultdict(list) content['english'] = ['JJ', 'RB', 'NN', 'VB'] content['spanish'] = ['ADJ', 'ADV', 'NC', 'NMEA', 'NMON', 'NP', 'VL'] content['german'] = ['ADJ', 'ADV', 'NN', 'NE', 'VA', 'VV', 'VM'] verbs['english'] = ['VB'] verbs['spanish'] = ['VL'] verbs['german'] = ['VA', 'VV', 'VM'] nouns['english'] = ['NN'] nouns['spanish'] = ['NC', 'NMEA', 'NMON', 'NP'] nouns['german'] = ['NN', 'NE'] pronouns['english'] = ['PP', 'WP$'] pronouns['spanish'] = ['DM', 'INT', 'PP', 'REL'] pronouns['german'] = ['PPOS', 'PD', 'PI', 'PREL', 'PW', 'PAV'] return content[lang], nouns[lang], verbs[lang], pronouns[lang] class POSFeatureExtractor(FeatureExtractor): def belongs_to(self, word_tag, category): for tag in category: if word_tag.startswith(tag): return True return False def __init__(self, lang_src, lang_tg): self.content_src, self.nouns_src, self.verbs_src, self.pronouns_src = get_tags(lang_src) self.content_tg, self.nouns_tg, self.verbs_tg, self.pronouns_tg = get_tags(lang_tg) if len(self.content_src) == 0: logger.warn("No POS lists for the language {}".format(lang_src)) if len(self.content_tg) == 0: logger.warn("No POS lists for the language {}".format(lang_tg)) def get_features(self, context_obj): if len(self.content_src) == 0 or len(self.content_tg) == 0: logger.warn("One or more POS lists are empty") content_src, content_tg, verbs_src, verbs_tg, nouns_src, nouns_tg, pronouns_src, pronouns_tg = 0, 0, 0, 0, 0, 0, 0, 0 source_idx = context_obj['source_index'] target_idx = context_obj['index'] # check if source words are nouns, verbs, content words if len(source_idx) > 0: for word in context_obj['source_pos'][source_idx[0]:source_idx[1]]: content = False if self.belongs_to(word, self.pronouns_src): pronouns_src += 1 if self.belongs_to(word, self.nouns_src): nouns_src += 1 if not content: content_src += 1 content = True if self.belongs_to(word, self.verbs_src): verbs_src += 1 if not content: content_src += 1 content = True if not content: if self.belongs_to(word, self.content_src): content_src += 1 # check if target words are nouns, verbs, content words for word in context_obj['target_pos'][target_idx[0]:target_idx[1]]: content = False if self.belongs_to(word, self.pronouns_tg): pronouns_tg += 1 if self.belongs_to(word, self.nouns_tg): nouns_tg += 1 if not content: content_tg += 1 content = True if self.belongs_to(word, self.verbs_tg): verbs_tg += 1 if not content: content_tg += 1 content = True if not content: if self.belongs_to(word, self.content_tg): content_tg += 1 content_tg_percent = content_tg/len(context_obj['token']) verbs_tg_percent = verbs_tg/len(context_obj['token']) nouns_tg_percent = nouns_tg/len(context_obj['token']) pronouns_tg_percent = pronouns_tg/len(context_obj['token']) content_src_percent = 0 verbs_src_percent = 0 nouns_src_percent = 0 pronouns_src_percent = 0 if len(context_obj['source_token']) > 0: content_src_percent = content_src/len(context_obj['source_token']) verbs_src_percent = verbs_src/len(context_obj['source_token']) nouns_src_percent = nouns_src/len(context_obj['source_token']) pronouns_src_percent = pronouns_src/len(context_obj['source_token']) ratio_content = content_src_percent/content_tg_percent if content_tg_percent > 0 else (1 if content_src_percent == 0 else 0) ratio_verbs = verbs_src_percent/verbs_tg_percent if verbs_tg_percent > 0 else (1 if verbs_src_percent == 0 else 0) ratio_nouns = nouns_src_percent/nouns_tg_percent if nouns_tg_percent > 0 else (1 if nouns_src_percent == 0 else 0) ratio_pronouns = pronouns_src_percent/pronouns_tg_percent if pronouns_tg_percent > 0 else (1 if pronouns_src_percent == 0 else 0) return [str(content_src_percent), str(content_tg_percent), str(verbs_src_percent), str(verbs_tg_percent), str(nouns_src_percent), str(nouns_tg_percent), str(pronouns_src_percent), str(pronouns_tg_percent), str(ratio_content), str(ratio_verbs), str(ratio_nouns), str(ratio_pronouns)] def get_feature_names(self): return ['percentage_content_words_src', 'percentage_content_words_tg', 'percentage_verbs_src', 'percentage_verbs_tg', 'percentage_nouns_src', 'percentage_nouns_tg', 'percentage_pronouns_src', 'percentage_pronouns_tg', 'ratio_content_words_src_tg', 'ratio_verbs_src_tg', 'ratio_nouns_src_tg', 'ratio_pronouns_src_tg']
6,043
44.104478
137
py
marmot
marmot-master/marmot/features/phrase/context_left_feature_extractor.py
import sys from marmot.features.feature_extractor import FeatureExtractor from marmot.util.ngram_window_extractor import left_context, right_context class ContextLeftFeatureExtractor(FeatureExtractor): ''' Same as ContextFeatureExtractor, but without right target context ''' def get_features(self, context_obj): #sys.stderr.write("Start ContextLeftFeatureExtractor\n") if 'source_token' in context_obj: left_src = left_context(context_obj['source'], context_obj['source_token'][0], context_size=1, idx=context_obj['source_index'][0]) right_src = right_context(context_obj['source'], context_obj['source_token'][-1], context_size=1, idx=context_obj['source_index'][1]-1) else: left_src = "" right_src = "" left_tg = left_context(context_obj['target'], context_obj['token'][0], context_size=1, idx=context_obj['index'][0]) #sys.stderr.write("Finish ContextLeftFeatureExtractor\n") return [left_src[0], right_src[0], left_tg[0]] def get_feature_names(self): return ['left_source_context', 'right_source_context', 'left_target_context']
1,163
43.769231
147
py
marmot
marmot-master/marmot/features/phrase/oov_feature_extractor.py
import sys from marmot.features.feature_extractor import FeatureExtractor from gensim.corpora import TextCorpus class OOVFeatureExtractor(FeatureExtractor): ''' Feature that indicates presence of OOV words in the source phrase. Values: 0 -- no OOV words 1 -- 1 or more OOV words ''' def __init__(self, corpus_file): corpus = TextCorpus(input=corpus_file) self.words = corpus.dictionary.values() def get_features(self, context_obj): #sys.stderr.write("Start OOVFeatureExtractor\n") # no source -- no OOVs if 'source_token' not in context_obj or len(context_obj['source_token']) == 0: #sys.stderr.write("Finish OOVFeatureExtractor\n") return ['0'] for word in context_obj['source_token']: if word not in self.words: #sys.stderr.write("Finish OOVFeatureExtractor\n") return ['1'] #sys.stderr.write("Finish OOVFeatureExtractor\n") return ['0'] def get_feature_names(self): return ['OOV_words']
1,079
29.857143
86
py
marmot
marmot-master/marmot/features/phrase/context_lm_left_feature_extractor.py
import os import sys import codecs from subprocess import call from collections import defaultdict from marmot.features.feature_extractor import FeatureExtractor from marmot.util.ngram_window_extractor import left_context, right_context from marmot.experiment.import_utils import mk_tmp_dir class ContextLMLeftFeatureExtractor(FeatureExtractor): ''' Same as ContextLMFeatureExtractor, but without right context ''' def __init__(self, ngram_file=None, corpus_file=None, srilm=None, tmp_dir=None, order=5): # generate ngram counts if ngram_file is None: if srilm is None: if 'SRILM' in os.environ: srilm = os.environ['SRILM'] else: print("No SRILM found") return if corpus_file is None: print ("No corpus for LM generation") return srilm_ngram_count = os.path.join(srilm, 'ngram-count') tmp_dir = mk_tmp_dir(tmp_dir) lm_file = os.path.join(tmp_dir, 'lm_file') ngram_file = os.path.join(tmp_dir, 'ngram_count_file') call([srilm_ngram_count, '-text', corpus_file, '-lm', lm_file, '-order', str(order), '-write', ngram_file]) self.lm = defaultdict(int) for line in codecs.open(ngram_file, encoding='utf-8'): chunks = line[:-1].split('\t') if len(chunks) == 2: new_tuple = tuple(chunks[0].split()) new_number = int(chunks[1]) self.lm[new_tuple] = new_number else: print("Wrong ngram-counts file format at line '", line[:-1], "'") self.order = order def check_lm(self, ngram, side='left'): for i in range(self.order, 0, -1): if side == 'left': cur_ngram = ngram[len(ngram)-i:] elif side == 'right': cur_ngram = ngram[:i] else: print("Unknown parameter 'side'", side) return 0 if tuple(cur_ngram) in self.lm: return i return 0 def get_backoff(self, ngram): assert(len(ngram) == 3) ngram = tuple(ngram) # trigram (1, 2, 3) if ngram in self.lm: return 1.0 # two bigrams (1, 2) and (2, 3) elif ngram[:2] in self.lm and ngram[1:] in self.lm: return 0.8 # bigram (2, 3) elif ngram[1:] in self.lm: return 0.6 # bigram (1, 2) and unigram (3) elif ngram[:2] in self.lm and ngram[2:] in self.lm: return 0.4 # unigrams (2) and (3) elif ngram[1:2] in self.lm and ngram[2:] in self.lm: return 0.3 # unigram (3) elif ngram[2:] in self.lm: return 0.2 # all words unknown else: return 0.1 def get_features(self, context_obj): #sys.stderr.write("Start ContextLMLeftFeatureExtractor\n") idx_left = context_obj['index'][0] idx_right = context_obj['index'][1] left_ngram = left_context(context_obj['target'], context_obj['token'][0], context_size=self.order-1, idx=idx_left) + [context_obj['token'][0]] left_ngram_order = self.check_lm(left_ngram, side='left') left_trigram = left_context(context_obj['target'], context_obj['token'][0], context_size=2, idx=idx_left) + [context_obj['token'][0]] backoff_left = self.get_backoff(left_trigram) #sys.stderr.write("Finish ContextLMLeftFeatureExtractor\n") return [str(left_ngram_order), str(backoff_left)] def get_feature_names(self): return ['highest_order_ngram_left', 'backoff_behavior_left']
3,745
35.368932
150
py
marmot
marmot-master/marmot/features/phrase/context_lm_feature_extractor.py
import os import codecs from subprocess import call from collections import defaultdict from marmot.features.feature_extractor import FeatureExtractor from marmot.util.ngram_window_extractor import left_context, right_context from marmot.experiment.import_utils import mk_tmp_dir class ContextLMFeatureExtractor(FeatureExtractor): ''' ''' def __init__(self, ngram_file=None, corpus_file=None, srilm=None, tmp_dir=None, order=5): # generate ngram counts if ngram_file is None: if srilm is None: if 'SRILM' in os.environ: srilm = os.environ['SRILM'] else: print("No SRILM found") return if corpus_file is None: print ("No corpus for LM generation") return srilm_ngram_count = os.path.join(srilm, 'ngram-count') tmp_dir = mk_tmp_dir(tmp_dir) lm_file = os.path.join(tmp_dir, 'lm_file') ngram_file = os.path.join(tmp_dir, 'ngram_count_file') call([srilm_ngram_count, '-text', corpus_file, '-lm', lm_file, '-order', str(order), '-write', ngram_file]) self.lm = defaultdict(int) for line in codecs.open(ngram_file, encoding='utf-8'): chunks = line[:-1].split('\t') if len(chunks) == 2: new_tuple = tuple(chunks[0].split()) new_number = int(chunks[1]) self.lm[new_tuple] = new_number else: print("Wrong ngram-counts file format at line '", line[:-1], "'") self.order = order def check_lm(self, ngram, side='left'): for i in range(self.order, 0, -1): if side == 'left': cur_ngram = ngram[len(ngram)-i:] elif side == 'right': cur_ngram = ngram[:i] else: print("Unknown parameter 'side'", side) return 0 if tuple(cur_ngram) in self.lm: return i return 0 def get_backoff(self, ngram): assert(len(ngram) == 3) ngram = tuple(ngram) # trigram (1, 2, 3) if ngram in self.lm: return 1.0 # two bigrams (1, 2) and (2, 3) elif ngram[:2] in self.lm and ngram[1:] in self.lm: return 0.8 # bigram (2, 3) elif ngram[1:] in self.lm: return 0.6 # bigram (1, 2) and unigram (3) elif ngram[:2] in self.lm and ngram[2:] in self.lm: return 0.4 # unigrams (2) and (3) elif ngram[1:2] in self.lm and ngram[2:] in self.lm: return 0.3 # unigram (3) elif ngram[2:] in self.lm: return 0.2 # all words unknown else: return 0.1 def get_features(self, context_obj): idx_left = context_obj['index'][0] idx_right = context_obj['index'][1] left_ngram = left_context(context_obj['target'], context_obj['token'][0], context_size=self.order-1, idx=idx_left) + [context_obj['token'][0]] right_ngram = [context_obj['token'][-1]] + right_context(context_obj['target'], context_obj['token'][-1], context_size=self.order-1, idx=idx_right) left_ngram_order = self.check_lm(left_ngram, side='left') right_ngram_order = self.check_lm(right_ngram, side='right') left_trigram = left_context(context_obj['target'], context_obj['token'][0], context_size=2, idx=idx_left) + [context_obj['token'][0]] right_trigram = [context_obj['token'][-1]] + right_context(context_obj['target'], context_obj['token'][-1], context_size=2, idx=idx_right) backoff_left = self.get_backoff(left_trigram) backoff_right = self.get_backoff(right_trigram) return [str(left_ngram_order), str(right_ngram_order), str(backoff_left), str(backoff_right)] def get_feature_names(self): return ['highest_order_ngram_left', 'highest_order_ngram_right', 'backoff_behavior_left', 'backoff_behavior_right']
4,057
38.398058
155
py
marmot
marmot-master/marmot/features/phrase/ne_feature_extractor.py
import sys from marmot.features.feature_extractor import FeatureExtractor class NEFeatureExtractor(FeatureExtractor): ''' Presence/absence of named entities in source and target phrases. Named entity = word with 1st capital letter ''' def get_features(self, context_obj): #sys.stderr.write("Start NEFeatureExtractor\n") src_ne, tg_ne = 0, 0 for word in context_obj['token']: if word[0].isupper(): tg_ne = 1 for word in context_obj['source_token']: if word[0].isupper(): src_ne = 1 #sys.stderr.write("Finish NEFeatureExtractor\n") return [str(src_ne), str(tg_ne)] def get_feature_names(self): return ['named_entity_source', 'named_entity_target']
786
30.48
68
py
marmot
marmot-master/marmot/features/phrase/pos_context_feature_extractor.py
from marmot.features.feature_extractor import FeatureExtractor from marmot.exceptions.no_data_error import NoDataError from marmot.util.ngram_window_extractor import left_context, right_context class POSContextFeatureExtractor(FeatureExtractor): def get_features(self, context_obj): if 'target_pos' not in context_obj: raise NoDataError('target_pos', context_obj, 'POSContextFeatureExtractor') if 'source_pos' not in context_obj: raise NoDataError('source_pos', context_obj, 'POSContextFeatureExtractor') left_src = left_context(context_obj['source_pos'], context_obj['source_pos'][context_obj['source_index'][0]], context_size=1, idx=context_obj['source_index'][0]) right_src = right_context(context_obj['source_pos'], context_obj['source_pos'][context_obj['source_index'][1]-1], context_size=1, idx=context_obj['source_index'][1]-1) left_tg = left_context(context_obj['target_pos'], context_obj['target_pos'][context_obj['index'][0]], context_size=1, idx=context_obj['index'][0]) right_tg = right_context(context_obj['target_pos'], context_obj['target_pos'][context_obj['index'][1]-1], context_size=1, idx=context_obj['index'][1]-1) return [left_src[0], right_src[0], left_tg[0], right_tg[0]] def get_feature_names(self): return ['left_source_context_pos', 'right_source_context_pos', 'left_target_context_pos', 'right_target_context_pos']
1,445
59.25
175
py