code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def __init__(self, hid_dim, n_heads, dropout=0.1, device=None): ''' :param hid_dim: number of features, i.e., input/output dimensionality :param n_heads: number of heads :param dropout: dropout probability ''' super(MultiheadAttention, self).__init__() self.hid_dim = hid_dim self.n_heads = n_heads assert hid_dim % n_heads == 0 # W_q matrix self.w_q = nn.Linear(hid_dim, hid_dim) # W_k matrix self.w_k = nn.Linear(hid_dim, hid_dim) # W_v matrix self.w_v = nn.Linear(hid_dim, hid_dim) ''' E.g., equation-10 for DASALC ''' self.fc = nn.Linear(hid_dim, hid_dim, bias=True) self.do_dropout = nn.Dropout(dropout) # scaling self.scale = torch.sqrt(torch.tensor([hid_dim // n_heads], dtype=torch.float, device=device))
:param hid_dim: number of features, i.e., input/output dimensionality :param n_heads: number of heads :param dropout: dropout probability
__init__
python
wildltr/ptranking
ptranking/base/list_ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/list_ranker.py
MIT
def forward(self, batch_rankings): ''' Forward pass through the multi-head attention block. :param batch_rankings: [batch_size, ranking_size, num_features] :return: ''' bsz = batch_rankings.shape[0] Q = self.w_q(batch_rankings) K = self.w_k(batch_rankings) V = self.w_v(batch_rankings) ''' Here, split {K Q V} into multi-group attentions, thus a 4-dimensional matrix ''' Q = Q.view(bsz, -1, self.n_heads, self.hid_dim // self.n_heads).permute(0, 2, 1, 3) K = K.view(bsz, -1, self.n_heads, self.hid_dim // self.n_heads).permute(0, 2, 1, 3) V = V.view(bsz, -1, self.n_heads, self.hid_dim // self.n_heads).permute(0, 2, 1, 3) ''' step-1 Q * K^T / sqrt(d_k) [batch_size, n_heads, ranking_size, num_features_sub_head] * [batch_size, n_heads, num_features_sub_head, ranking_size] = [batch_size, n_heads, ranking_size, ranking_size] ''' attention = torch.matmul(Q, K.permute(0, 1, 3, 2)) / self.scale ''' step-2 perform softmax -> dropout get attention [batch_size, n_heads, ranking_size, ranking_size] ''' attention = self.do_dropout(torch.softmax(attention, dim=-1)) ''' step-3 multipy attention and V, and get the results of multi-heads [batch_size, n_heads, ranking_size, ranking_size] * [batch_size, n_heads, ranking_size, num_features_sub_head] = [batch_size, n_heads, ranking_size, num_features_sub_head] ''' x = torch.matmul(attention, V) # transpose again for later concatenation -> [batch_size, ranking_size, n_heads, num_features_sub_head] x = x.permute(0, 2, 1, 3).contiguous() # x: [64,12,6,50] -> [64,12,300] # -> [batch_size, ranking_size, num_features] x = x.view(bsz, -1, self.n_heads * (self.hid_dim // self.n_heads)) x = self.fc(x) return x
Forward pass through the multi-head attention block. :param batch_rankings: [batch_size, ranking_size, num_features] :return:
forward
python
wildltr/ptranking
ptranking/base/list_ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/list_ranker.py
MIT
def __init__(self, num_features, hid_dim, dropout=0.1): """ :param num_features: input/output dimensionality :param hid_dim: hidden dimensionality :param dropout: dropout probability """ super(PositionwiseFeedForward, self).__init__() self.w1 = nn.Linear(num_features, hid_dim) self.w2 = nn.Linear(hid_dim, num_features) self.dropout = nn.Dropout(dropout)
:param num_features: input/output dimensionality :param hid_dim: hidden dimensionality :param dropout: dropout probability
__init__
python
wildltr/ptranking
ptranking/base/list_ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/list_ranker.py
MIT
def ini_listsf(self, num_features=None, ff_dims=[128, 256, 512], out_dim=1, AF='R', TL_AF='GE', apply_tl_af=False, BN=True, bn_type=None, bn_affine=False, n_heads=2, encoder_layers=3, dropout=0.1, encoder_type=None): ''' Initialization of a permutation equivariant neural network ''' ''' Component-1: stacked feed-forward layers for initial encoding ''' head_ff_dims = [num_features] head_ff_dims.extend(ff_dims) head_ff_dims.append(num_features) head_ffnns = get_stacked_FFNet(ff_dims=head_ff_dims, AF=AF, TL_AF=AF, apply_tl_af=True, dropout=dropout, BN=BN, bn_type=bn_type, bn_affine=bn_affine, device=self.device) ''' Component-2: stacked multi-head self-attention (MHSA) blocks ''' encoder_dim = num_features mhsa = MultiheadAttention(hid_dim=encoder_dim, n_heads=n_heads, dropout=dropout, device=self.device) if 'AllRank' == encoder_type: fc = PositionwiseFeedForward(num_features, hid_dim=encoder_dim, dropout=dropout) encoder = Encoder(layer=EncoderLayer(hid_dim=encoder_dim, mhsa=dc(mhsa), encoder_type=encoder_type, fc=fc, dropout=dropout), num_layers=encoder_layers, encoder_type=encoder_type) elif 'DASALC' == encoder_type: # we note that feature normalization strategy is different from AllRank encoder = Encoder(layer=EncoderLayer(hid_dim=encoder_dim, mhsa=dc(mhsa), encoder_type=encoder_type), num_layers=encoder_layers, encoder_type=encoder_type) elif 'AttnDIN' == encoder_type: encoder = Encoder(layer=EncoderLayer(hid_dim=encoder_dim, mhsa=dc(mhsa), encoder_type=encoder_type), num_layers=encoder_layers, encoder_type=encoder_type) else: raise NotImplementedError ''' Component-3: stacked feed-forward layers for relevance prediction ''' tail_ff_dims = [num_features] tail_ff_dims.extend(ff_dims) tail_ff_dims.append(out_dim) tail_ffnns = get_stacked_FFNet(ff_dims=tail_ff_dims, AF=AF, TL_AF=TL_AF, apply_tl_af=apply_tl_af, BN=BN, bn_type=bn_type, bn_affine=bn_affine, device=self.device) if self.gpu: head_ffnns = head_ffnns.to(self.device) encoder = encoder.to(self.device) tail_ffnns = tail_ffnns.to(self.device) list_sf = {'head_ffnns': head_ffnns, 'encoder': encoder, 'tail_ffnns': tail_ffnns} return list_sf
Initialization of a permutation equivariant neural network
ini_listsf
python
wildltr/ptranking
ptranking/base/list_ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/list_ranker.py
MIT
def forward(self, batch_q_doc_vectors): ''' Forward pass through the scoring function, where the documents associated with the same query are scored jointly. @param batch_q_doc_vectors: [batch_size, num_docs, num_features], the latter two dimensions {num_docs, num_features} denote feature vectors associated with the same query. @return: ''' if 'AllRank' == self.encoder_type: # deploy the same mapping for batch queries batch_FC_mappings = self.list_sf['head_ffnns'](batch_q_doc_vectors) # -> the same shape as the output of encoder batch_encoder_mappings = self.list_sf['encoder'](batch_FC_mappings) batch_preds = self.list_sf['tail_ffnns'](batch_encoder_mappings) elif 'DASALC' == self.encoder_type: batch_FC_mappings = self.list_sf['head_ffnns'](batch_q_doc_vectors) # -> the same shape as the output of encoder batch_encoder_mappings = self.list_sf['encoder'](batch_q_doc_vectors) # the input of encoder differs from DASALC latent_cross_mappings = (batch_encoder_mappings + 1.0) * batch_FC_mappings batch_preds = self.list_sf['tail_ffnns'](latent_cross_mappings) elif 'AttnDIN' == self.encoder_type: batch_FC_mappings = self.list_sf['head_ffnns'](batch_q_doc_vectors) # -> the same shape as the output of encoder batch_encoder_mappings = self.list_sf['encoder'](batch_FC_mappings) # the input of encoder differs from DASALC concat_mappings = batch_encoder_mappings + batch_q_doc_vectors batch_preds = self.list_sf['tail_ffnns'](concat_mappings) else: raise NotImplementedError batch_pred = torch.squeeze(batch_preds, dim=2) # [batch, num_docs, 1] -> [batch, num_docs] return batch_pred
Forward pass through the scoring function, where the documents associated with the same query are scored jointly. @param batch_q_doc_vectors: [batch_size, num_docs, num_features], the latter two dimensions {num_docs, num_features} denote feature vectors associated with the same query. @return:
forward
python
wildltr/ptranking
ptranking/base/list_ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/list_ranker.py
MIT
def ini_pointsf(self, num_features=None, h_dim=100, out_dim=1, num_layers=3, AF='R', TL_AF='S', apply_tl_af=False, BN=True, bn_type=None, bn_affine=False, dropout=0.1): ''' Initialization of a feed-forward neural network ''' ff_dims = [num_features] for i in range(num_layers): ff_dims.append(h_dim) ff_dims.append(out_dim) point_sf = get_stacked_FFNet(ff_dims=ff_dims, AF=AF, TL_AF=TL_AF, apply_tl_af=apply_tl_af, dropout=dropout, BN=BN, bn_type=bn_type, bn_affine=bn_affine, device=self.device) return point_sf
Initialization of a feed-forward neural network
ini_pointsf
python
wildltr/ptranking
ptranking/base/point_ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/point_ranker.py
MIT
def forward(self, batch_q_doc_vectors): ''' Forward pass through the scoring function, where each document is scored independently. @param batch_q_doc_vectors: [batch_size, num_docs, num_features], the latter two dimensions {num_docs, num_features} denote feature vectors associated with the same query. @return: ''' batch_size, num_docs, num_features = batch_q_doc_vectors.size() _batch_preds = self.point_sf(batch_q_doc_vectors) batch_preds = _batch_preds.view(-1, num_docs) # [batch_size x num_docs, 1] -> [batch_size, num_docs] return batch_preds
Forward pass through the scoring function, where each document is scored independently. @param batch_q_doc_vectors: [batch_size, num_docs, num_features], the latter two dimensions {num_docs, num_features} denote feature vectors associated with the same query. @return:
forward
python
wildltr/ptranking
ptranking/base/point_ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/point_ranker.py
MIT
def ndcg_at_k(self, test_data=None, k=10, label_type=LABEL_TYPE.MultiLabel, presort=False, device='cpu'): ''' Compute nDCG@k with the given data An underlying assumption is that there is at least one relevant document, or ZeroDivisionError appears. ''' self.eval_mode() # switch evaluation mode num_queries = 0 sum_ndcg_at_k = torch.zeros(1) for batch_ids, batch_q_doc_vectors, batch_std_labels in test_data: # batch_size, [batch_size, num_docs, num_features], [batch_size, num_docs] if batch_std_labels.size(1) < k: continue # skip if the number of documents is smaller than k else: num_queries += len(batch_ids) if self.gpu: batch_q_doc_vectors = batch_q_doc_vectors.to(self.device) batch_preds = self.predict(batch_q_doc_vectors) if self.gpu: batch_preds = batch_preds.cpu() _, batch_pred_desc_inds = torch.sort(batch_preds, dim=1, descending=True) batch_predict_rankings = torch.gather(batch_std_labels, dim=1, index=batch_pred_desc_inds) if presort: batch_ideal_rankings = batch_std_labels else: batch_ideal_rankings, _ = torch.sort(batch_std_labels, dim=1, descending=True) batch_ndcg_at_k = torch_ndcg_at_k(batch_predict_rankings=batch_predict_rankings, batch_ideal_rankings=batch_ideal_rankings, k=k, label_type=label_type, device=device) sum_ndcg_at_k += torch.sum(batch_ndcg_at_k) # due to batch processing avg_ndcg_at_k = sum_ndcg_at_k / num_queries return avg_ndcg_at_k
Compute nDCG@k with the given data An underlying assumption is that there is at least one relevant document, or ZeroDivisionError appears.
ndcg_at_k
python
wildltr/ptranking
ptranking/base/ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/ranker.py
MIT
def ndcg_at_ks(self, test_data=None, ks=[1, 5, 10], label_type=LABEL_TYPE.MultiLabel, presort=False, device='cpu'): ''' Compute nDCG with multiple cutoff values with the given data An underlying assumption is that there is at least one relevant document, or ZeroDivisionError appears. ''' self.eval_mode() # switch evaluation mode num_queries = 0 sum_ndcg_at_ks = torch.zeros(len(ks)) for batch_ids, batch_q_doc_vectors, batch_std_labels in test_data: # batch_size, [batch_size, num_docs, num_features], [batch_size, num_docs] if self.gpu: batch_q_doc_vectors = batch_q_doc_vectors.to(self.device) batch_preds = self.predict(batch_q_doc_vectors) if self.gpu: batch_preds = batch_preds.cpu() _, batch_pred_desc_inds = torch.sort(batch_preds, dim=1, descending=True) batch_predict_rankings = torch.gather(batch_std_labels, dim=1, index=batch_pred_desc_inds) if presort: batch_ideal_rankings = batch_std_labels else: batch_ideal_rankings, _ = torch.sort(batch_std_labels, dim=1, descending=True) batch_ndcg_at_ks = torch_ndcg_at_ks(batch_predict_rankings=batch_predict_rankings, batch_ideal_rankings=batch_ideal_rankings, ks=ks, label_type=label_type, device=device) sum_ndcg_at_ks = torch.add(sum_ndcg_at_ks, torch.sum(batch_ndcg_at_ks, dim=0)) num_queries += len(batch_ids) avg_ndcg_at_ks = sum_ndcg_at_ks / num_queries return avg_ndcg_at_ks
Compute nDCG with multiple cutoff values with the given data An underlying assumption is that there is at least one relevant document, or ZeroDivisionError appears.
ndcg_at_ks
python
wildltr/ptranking
ptranking/base/ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/ranker.py
MIT
def ap_at_k(self, test_data=None, k=10, presort=False, device='cpu'): ''' Compute the performance using multiple metrics ''' self.eval_mode() # switch evaluation mode num_queries = 0 sum_ap_at_k = torch.zeros(1) for batch_ids, batch_q_doc_vectors, batch_std_labels in test_data: # batch_size, [batch_size, num_docs, num_features], [batch_size, num_docs] if batch_std_labels.size(1) < k: continue # skip if the number of documents is smaller than k else: num_queries += len(batch_ids) if self.gpu: batch_q_doc_vectors = batch_q_doc_vectors.to(self.device) batch_preds = self.predict(batch_q_doc_vectors) if self.gpu: batch_preds = batch_preds.cpu() _, batch_pred_desc_inds = torch.sort(batch_preds, dim=1, descending=True) batch_predict_rankings = torch.gather(batch_std_labels, dim=1, index=batch_pred_desc_inds) if presort: batch_ideal_rankings = batch_std_labels else: batch_ideal_rankings, _ = torch.sort(batch_std_labels, dim=1, descending=True) batch_ap_at_k = torch_ap_at_k(batch_predict_rankings=batch_predict_rankings, batch_ideal_rankings=batch_ideal_rankings, k=k, device=device) sum_ap_at_k += torch.sum(batch_ap_at_k) # due to batch processing avg_ap_at_k = sum_ap_at_k / num_queries return avg_ap_at_k
Compute the performance using multiple metrics
ap_at_k
python
wildltr/ptranking
ptranking/base/ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/ranker.py
MIT
def p_at_k(self, test_data=None, k=10, device='cpu'): ''' Compute the performance using multiple metrics ''' self.eval_mode() # switch evaluation mode num_queries = 0 sum_p_at_k = torch.zeros(1) for batch_ids, batch_q_doc_vectors, batch_std_labels in test_data: # batch_size, [batch_size, num_docs, num_features], [batch_size, num_docs] if batch_std_labels.size(1) < k: continue # skip if the number of documents is smaller than k else: num_queries += len(batch_ids) if self.gpu: batch_q_doc_vectors = batch_q_doc_vectors.to(self.device) batch_preds = self.predict(batch_q_doc_vectors) if self.gpu: batch_preds = batch_preds.cpu() _, batch_pred_desc_inds = torch.sort(batch_preds, dim=1, descending=True) batch_predict_rankings = torch.gather(batch_std_labels, dim=1, index=batch_pred_desc_inds) batch_p_at_k = torch_precision_at_k(batch_predict_rankings=batch_predict_rankings, k=k, device=device) sum_p_at_k += torch.sum(batch_p_at_k) # due to batch processing avg_p_at_k = sum_p_at_k / num_queries return avg_p_at_k
Compute the performance using multiple metrics
p_at_k
python
wildltr/ptranking
ptranking/base/ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/ranker.py
MIT
def adhoc_performance_at_ks(self, test_data=None, ks=[1, 5, 10], label_type=LABEL_TYPE.MultiLabel, max_label=None, presort=False, device='cpu', need_per_q=False): ''' Compute the performance using multiple metrics ''' self.eval_mode() # switch evaluation mode num_queries = 0 sum_ndcg_at_ks = torch.zeros(len(ks)) sum_nerr_at_ks = torch.zeros(len(ks)) sum_ap_at_ks = torch.zeros(len(ks)) sum_p_at_ks = torch.zeros(len(ks)) if need_per_q: list_per_q_p, list_per_q_ap, list_per_q_nerr, list_per_q_ndcg = [], [], [], [] for batch_ids, batch_q_doc_vectors, batch_std_labels in test_data: # batch_size, [batch_size, num_docs, num_features], [batch_size, num_docs] if self.gpu: batch_q_doc_vectors = batch_q_doc_vectors.to(self.device) batch_preds = self.predict(batch_q_doc_vectors) if self.gpu: batch_preds = batch_preds.cpu() _, batch_pred_desc_inds = torch.sort(batch_preds, dim=1, descending=True) batch_predict_rankings = torch.gather(batch_std_labels, dim=1, index=batch_pred_desc_inds) if presort: batch_ideal_rankings = batch_std_labels else: batch_ideal_rankings, _ = torch.sort(batch_std_labels, dim=1, descending=True) batch_ndcg_at_ks = torch_ndcg_at_ks(batch_predict_rankings=batch_predict_rankings, batch_ideal_rankings=batch_ideal_rankings, ks=ks, label_type=label_type, device=device) sum_ndcg_at_ks = torch.add(sum_ndcg_at_ks, torch.sum(batch_ndcg_at_ks, dim=0)) batch_nerr_at_ks = torch_nerr_at_ks(batch_predict_rankings=batch_predict_rankings, batch_ideal_rankings=batch_ideal_rankings, max_label=max_label, ks=ks, label_type=label_type, device=device) sum_nerr_at_ks = torch.add(sum_nerr_at_ks, torch.sum(batch_nerr_at_ks, dim=0)) batch_ap_at_ks = torch_ap_at_ks(batch_predict_rankings=batch_predict_rankings, batch_ideal_rankings=batch_ideal_rankings, ks=ks, device=device) sum_ap_at_ks = torch.add(sum_ap_at_ks, torch.sum(batch_ap_at_ks, dim=0)) batch_p_at_ks = torch_precision_at_ks(batch_predict_rankings=batch_predict_rankings, ks=ks, device=device) sum_p_at_ks = torch.add(sum_p_at_ks, torch.sum(batch_p_at_ks, dim=0)) if need_per_q: list_per_q_p.append(batch_p_at_ks) list_per_q_ap.append(batch_ap_at_ks) list_per_q_nerr.append(batch_nerr_at_ks) list_per_q_ndcg.append(batch_ndcg_at_ks) num_queries += len(batch_ids) avg_ndcg_at_ks = sum_ndcg_at_ks / num_queries avg_nerr_at_ks = sum_nerr_at_ks / num_queries avg_ap_at_ks = sum_ap_at_ks / num_queries avg_p_at_ks = sum_p_at_ks / num_queries if need_per_q: return avg_ndcg_at_ks, avg_nerr_at_ks, avg_ap_at_ks, avg_p_at_ks,\ list_per_q_ndcg, list_per_q_nerr, list_per_q_ap, list_per_q_p else: return avg_ndcg_at_ks, avg_nerr_at_ks, avg_ap_at_ks, avg_p_at_ks
Compute the performance using multiple metrics
adhoc_performance_at_ks
python
wildltr/ptranking
ptranking/base/ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/ranker.py
MIT
def alpha_ndcg_at_k(self, test_data=None, k=5, device='cpu'): ''' Compute alpha-nDCG@k with the given data @param test_data: @param k: @return: ''' self.eval_mode() assert test_data.presort is True cnt = torch.zeros(1) sum_alpha_nDCG_at_k = torch.zeros(1) for qid, q_repr, perm_docs, doc_reprs, alphaDCG, q_doc_subtopics, q_doc_rele_mat in test_data: if torch.sum(q_doc_rele_mat) < 1.0: continue # since this instance provides no learning signal if q_doc_rele_mat.size(1) < k: continue # skip the query if the number of associated documents is smaller than k if self.gpu: q_repr, doc_reprs = q_repr.to(self.device), doc_reprs.to(self.device) sys_rele_preds = self.div_predict(q_repr, doc_reprs) # [1, ranking_size] if self.gpu: sys_rele_preds = sys_rele_preds.cpu() _, sys_sorted_inds = torch.sort(sys_rele_preds, dim=1, descending=True) # [1, ranking_size] ''' the output result will have the same shape as index ''' sys_q_doc_rele_mat = torch.gather(q_doc_rele_mat, dim=1, index=sys_sorted_inds.expand(q_doc_rele_mat.size(0), -1)) ''' the alternative way for gather() ''' #sys_q_doc_rele_mat = q_doc_rele_mat[:, torch.squeeze(sys_sorted_inds, dim=0)] ideal_q_doc_rele_mat = q_doc_rele_mat # under the assumption of presort alpha_nDCG_at_k = torch_alpha_ndcg_at_k(sys_q_doc_rele_mat=sys_q_doc_rele_mat, k=k, alpha=0.5, device=device, ideal_q_doc_rele_mat=ideal_q_doc_rele_mat) sum_alpha_nDCG_at_k += alpha_nDCG_at_k # default batch_size=1 due to testing data cnt += 1 avg_alpha_nDCG_at_k = sum_alpha_nDCG_at_k / cnt return avg_alpha_nDCG_at_k
Compute alpha-nDCG@k with the given data @param test_data: @param k: @return:
alpha_ndcg_at_k
python
wildltr/ptranking
ptranking/base/ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/ranker.py
MIT
def alpha_ndcg_at_ks(self, test_data=None, ks=[1, 5, 10], device='cpu'): ''' Compute alpha-nDCG with multiple cutoff values with the given data There is no check based on the assumption (say light_filtering() is called) that each test instance Q includes at least k documents, and at least one relevant document. Or there will be errors. ''' self.eval_mode() assert test_data.presort is True cnt = torch.zeros(1) sum_alpha_nDCG_at_ks = torch.zeros(len(ks)) for qid, q_repr, perm_docs, doc_reprs, alphaDCG, q_doc_subtopics, q_doc_rele_mat in test_data: if torch.sum(q_doc_rele_mat) < 1.0: continue if self.gpu: q_repr, doc_reprs = q_repr.to(self.device), doc_reprs.to(self.device) sys_rele_preds = self.div_predict(q_repr, doc_reprs) if self.gpu: sys_rele_preds = sys_rele_preds.cpu() _, sys_sorted_inds = torch.sort(sys_rele_preds, dim=1, descending=True) sys_q_doc_rele_mat = torch.gather(q_doc_rele_mat, dim=1, index=sys_sorted_inds.expand(q_doc_rele_mat.size(0), -1)) ideal_q_doc_rele_mat = q_doc_rele_mat # under the assumption of presort alpha_nDCG_at_ks = torch_alpha_ndcg_at_ks(sys_q_doc_rele_mat=sys_q_doc_rele_mat, ks=ks, alpha=0.5, ideal_q_doc_rele_mat=ideal_q_doc_rele_mat, device=device) sum_alpha_nDCG_at_ks = torch.add(sum_alpha_nDCG_at_ks, torch.squeeze(alpha_nDCG_at_ks, dim=0)) cnt += 1 avg_alpha_nDCG_at_ks = sum_alpha_nDCG_at_ks / cnt return avg_alpha_nDCG_at_ks
Compute alpha-nDCG with multiple cutoff values with the given data There is no check based on the assumption (say light_filtering() is called) that each test instance Q includes at least k documents, and at least one relevant document. Or there will be errors.
alpha_ndcg_at_ks
python
wildltr/ptranking
ptranking/base/ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/ranker.py
MIT
def err_ia_at_k(self, test_data=None, k=5, max_label=None, device='cpu'): ''' Compute ERR-IA@k with the given data @param test_data: @param k: @return: ''' self.eval_mode() cnt = torch.zeros(1) sum_err_ia_at_k = torch.zeros(1) for qid, q_repr, perm_docs, doc_reprs, alphaDCG, q_doc_subtopics, q_doc_rele_mat in test_data: if torch.sum(q_doc_rele_mat) < 1.0: continue # since this instance provides no learning signal if q_doc_rele_mat.size(1) < k: continue # skip query if the number of associated documents is smaller than k if self.gpu: q_repr, doc_reprs = q_repr.to(self.device), doc_reprs.to(self.device) sys_rele_preds = self.div_predict(q_repr, doc_reprs) # [1, ranking_size] if self.gpu: sys_rele_preds = sys_rele_preds.cpu() _, sys_sorted_inds = torch.sort(sys_rele_preds, dim=1, descending=True) # [1, ranking_size] ''' the output result will have the same shape as index ''' sys_q_doc_rele_mat = \ torch.gather(q_doc_rele_mat, dim=1, index=sys_sorted_inds.expand(q_doc_rele_mat.size(0), -1)) ''' the alternative way for gather() ''' #sys_q_doc_rele_mat = q_doc_rele_mat[:, torch.squeeze(sys_sorted_inds, dim=0)] err_ia_at_k = \ torch_err_ia_at_k(sorted_q_doc_rele_mat=sys_q_doc_rele_mat, max_label=max_label, k=k, device=device) sum_err_ia_at_k += err_ia_at_k # default batch_size=1 due to testing data cnt += 1 avg_err_ia_at_k = sum_err_ia_at_k / cnt return avg_err_ia_at_k
Compute ERR-IA@k with the given data @param test_data: @param k: @return:
err_ia_at_k
python
wildltr/ptranking
ptranking/base/ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/ranker.py
MIT
def nerr_ia_at_k(self, test_data=None, k=5, max_label=None, device='cpu'): ''' Compute nERR-IA@k with the given data @param test_data: @param k: @return: ''' self.eval_mode() assert test_data.presort is True cnt = torch.zeros(1) sum_nerr_ia_at_k = torch.zeros(1) for qid, q_repr, perm_docs, doc_reprs, alphaDCG, q_doc_subtopics, q_doc_rele_mat in test_data: if torch.sum(q_doc_rele_mat) < 1.0: continue # since this instance provides no learning signal if q_doc_rele_mat.size(1) < k: continue # skip the query if the number of associated documents is smaller than k if self.gpu: q_repr, doc_reprs = q_repr.to(self.device), doc_reprs.to(self.device) sys_rele_preds = self.div_predict(q_repr, doc_reprs) # [1, ranking_size] if self.gpu: sys_rele_preds = sys_rele_preds.cpu() _, sys_sorted_inds = torch.sort(sys_rele_preds, dim=1, descending=True) # [1, ranking_size] ''' the output result will have the same shape as index ''' sys_q_doc_rele_mat = torch.gather(q_doc_rele_mat, dim=1, index=sys_sorted_inds.expand(q_doc_rele_mat.size(0), -1)) ''' the alternative way for gather() ''' #sys_q_doc_rele_mat = q_doc_rele_mat[:, torch.squeeze(sys_sorted_inds, dim=0)] ideal_q_doc_rele_mat = q_doc_rele_mat # under the assumption of presort nerr_ia_at_k = torch_nerr_ia_at_k(sys_q_doc_rele_mat=sys_q_doc_rele_mat, max_label=max_label, ideal_q_doc_rele_mat=ideal_q_doc_rele_mat, k=k, device=device) sum_nerr_ia_at_k += nerr_ia_at_k # default batch_size=1 due to testing data cnt += 1 avg_nerr_ia_at_k = sum_nerr_ia_at_k / cnt return avg_nerr_ia_at_k
Compute nERR-IA@k with the given data @param test_data: @param k: @return:
nerr_ia_at_k
python
wildltr/ptranking
ptranking/base/ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/ranker.py
MIT
def srd_performance_at_ks(self, test_data=None, ks=[1, 5, 10], max_label=None, device='cpu', generate_div_run=False, dir=None, fold_k=None, need_per_q_andcg=False): ''' Compute the performance using multiple metrics ''' self.eval_mode() # switch evaluation mode assert test_data.presort is True num_queries = 0 sum_andcg_at_ks = torch.zeros(len(ks), device=device) sum_err_ia_at_ks = torch.zeros(len(ks), device=device) sum_nerr_ia_at_ks = torch.zeros(len(ks), device=device) if need_per_q_andcg: list_per_q_andcg = [] if generate_div_run: fold_run = open(dir + '/fold_run.txt', 'w') for qid, q_repr, perm_docs, doc_reprs, alphaDCG, q_doc_subtopics, q_doc_rele_mat in test_data: if not torch.sum(q_doc_rele_mat) > 0: continue # skip the case of no positive labels if self.gpu: q_repr, doc_reprs = q_repr.to(self.device), doc_reprs.to(self.device) sys_rele_preds = self.div_predict(q_repr, doc_reprs) if self.gpu: sys_rele_preds = sys_rele_preds.cpu() _, sys_sorted_inds = torch.sort(sys_rele_preds, dim=1, descending=True) if generate_div_run: np_sys_sorted_inds = torch.squeeze(sys_sorted_inds).data.numpy() num_docs = len(perm_docs) for i in range(num_docs): doc = perm_docs[np_sys_sorted_inds[i]] fold_run.write(' '.join([str(qid), 'Q0', doc, str(i + 1), str(num_docs - i), 'Fold'+str(fold_k)+"\n"])) fold_run.flush() sys_q_doc_rele_mat = \ torch.gather(q_doc_rele_mat, dim=1, index=sys_sorted_inds.expand(q_doc_rele_mat.size(0), -1)) ideal_q_doc_rele_mat = q_doc_rele_mat # under the assumption of presort andcg_at_ks = torch_alpha_ndcg_at_ks(sys_q_doc_rele_mat=sys_q_doc_rele_mat, ks=ks, alpha=0.5, device=device, ideal_q_doc_rele_mat=ideal_q_doc_rele_mat) err_ia_at_ks = torch_err_ia_at_ks(sorted_q_doc_rele_mat=sys_q_doc_rele_mat, max_label=max_label, ks=ks, device=device) nerr_ia_at_ks = torch_nerr_ia_at_ks(sys_q_doc_rele_mat=sys_q_doc_rele_mat, ideal_q_doc_rele_mat=ideal_q_doc_rele_mat, max_label=max_label, ks=ks, device=device) if need_per_q_andcg: list_per_q_andcg.append(andcg_at_ks) sum_andcg_at_ks = torch.add(sum_andcg_at_ks, torch.squeeze(andcg_at_ks, dim=0)) sum_err_ia_at_ks = torch.add(sum_err_ia_at_ks, err_ia_at_ks) sum_nerr_ia_at_ks = torch.add(sum_nerr_ia_at_ks, nerr_ia_at_ks) num_queries += 1 if generate_div_run: fold_run.flush() fold_run.close() avg_andcg_at_ks = sum_andcg_at_ks / num_queries avg_err_ia_at_ks = sum_err_ia_at_ks / num_queries avg_nerr_ia_at_ks = sum_nerr_ia_at_ks / num_queries if need_per_q_andcg: return avg_andcg_at_ks, avg_err_ia_at_ks, avg_nerr_ia_at_ks, list_per_q_andcg else: return avg_andcg_at_ks, avg_err_ia_at_ks, avg_nerr_ia_at_ks
Compute the performance using multiple metrics
srd_performance_at_ks
python
wildltr/ptranking
ptranking/base/ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/ranker.py
MIT
def stop_training(self, batch_preds): ''' Stop training if the predictions are all zeros or include nan value(s) ''' #if torch.nonzero(preds).size(0) <= 0: # todo-as-note: 'preds.byte().any()' seems wrong operation w.r.t. gpu if torch.nonzero(batch_preds, as_tuple=False).size(0) <= 0: # due to the UserWarning: This overload of nonzero is deprecated: print('All zero error.\n') return True if torch.isnan(batch_preds).any(): print('Including NaN error.') return True return False
Stop training if the predictions are all zeros or include nan value(s)
stop_training
python
wildltr/ptranking
ptranking/base/ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/ranker.py
MIT
def train(self, train_data, epoch_k=None, **kwargs): ''' One epoch training using the entire training data ''' self.train_mode() assert 'label_type' in kwargs and 'presort' in kwargs label_type, presort = kwargs['label_type'], kwargs['presort'] num_queries = 0 epoch_loss = torch.tensor([0.0], device=self.device) for batch_ids, batch_q_doc_vectors, batch_std_labels in train_data: # batch_size, [batch_size, num_docs, num_features], [batch_size, num_docs] num_queries += len(batch_ids) if self.gpu: batch_q_doc_vectors, batch_std_labels = batch_q_doc_vectors.to(self.device), batch_std_labels.to(self.device) batch_loss, stop_training = self.train_op(batch_q_doc_vectors, batch_std_labels, batch_ids=batch_ids, epoch_k=epoch_k, presort=presort, label_type=label_type) if stop_training: break else: epoch_loss += batch_loss.item() epoch_loss = epoch_loss/num_queries return epoch_loss, stop_training
One epoch training using the entire training data
train
python
wildltr/ptranking
ptranking/base/ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/ranker.py
MIT
def train_op(self, batch_q_doc_vectors, batch_std_labels, **kwargs): ''' The training operation over a batch of queries. @param batch_q_doc_vectors: [batch_size, num_docs, num_features], the latter two dimensions {num_docs, num_features} denote feature vectors associated with the same query. @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance labels for documents associated with the same query. @param kwargs: optional arguments @return: ''' stop_training = False batch_preds = self.forward(batch_q_doc_vectors) if 'epoch_k' in kwargs and kwargs['epoch_k'] % self.stop_check_freq == 0: stop_training = self.stop_training(batch_preds) return self.custom_loss_function(batch_preds, batch_std_labels, **kwargs), stop_training
The training operation over a batch of queries. @param batch_q_doc_vectors: [batch_size, num_docs, num_features], the latter two dimensions {num_docs, num_features} denote feature vectors associated with the same query. @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance labels for documents associated with the same query. @param kwargs: optional arguments @return:
train_op
python
wildltr/ptranking
ptranking/base/ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/ranker.py
MIT
def div_train(self, train_data, epoch_k=None): ''' One epoch training using the entire training data ''' self.train_mode() presort = train_data.presort epoch_loss = torch.tensor([0.0], device=self.device) for qid, q_repr, perm_docs, doc_reprs, alphaDCG, q_doc_subtopics, q_doc_rele_mat in train_data: if torch.sum(q_doc_rele_mat) < 1.0: continue # skip instances that provide no training signal if self.gpu: q_repr, doc_reprs, q_doc_rele_mat = q_repr.to(self.device), doc_reprs.to(self.device), q_doc_rele_mat.to(self.device) batch_loss, stop_training = self.div_train_op(q_repr, doc_reprs, q_doc_rele_mat, qid=qid, alphaDCG=alphaDCG, epoch_k=epoch_k, presort=presort) if stop_training: break else: epoch_loss += batch_loss.item() len = train_data.__len__() epoch_loss = epoch_loss/len return epoch_loss, stop_training
One epoch training using the entire training data
div_train
python
wildltr/ptranking
ptranking/base/ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/ranker.py
MIT
def div_train_op(self, q_repr, doc_reprs, q_doc_rele_mat, **kwargs): ''' Per-query training based on the documents that are associated with the same query. ''' stop_training = False batch_pred = self.div_forward(q_repr, doc_reprs) if 'epoch_k' in kwargs and kwargs['epoch_k'] % self.stop_check_freq == 0: stop_training = self.stop_training(batch_pred) return self.div_custom_loss_function(batch_pred, q_doc_rele_mat, **kwargs), stop_training
Per-query training based on the documents that are associated with the same query.
div_train_op
python
wildltr/ptranking
ptranking/base/ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/ranker.py
MIT
def forward(ctx, input, sigma=1.0): ''' :param ctx: :param input: the input tensor :param sigma: the scaling constant :return: ''' x = input if 1.0==sigma else sigma * input sigmoid_x = 1. / (1. + torch.exp(-x)) grad = sigmoid_x * (1. - sigmoid_x) if 1.0==sigma else sigma * sigmoid_x * (1. - sigmoid_x) ctx.save_for_backward(grad) return sigmoid_x
:param ctx: :param input: the input tensor :param sigma: the scaling constant :return:
forward
python
wildltr/ptranking
ptranking/base/utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/utils.py
MIT
def backward(ctx, grad_output): ''' :param ctx: :param grad_output: backpropagated gradients from upper module(s) :return: ''' grad = ctx.saved_tensors[0] bg = grad_output * grad # chain rule return bg, None
:param ctx: :param grad_output: backpropagated gradients from upper module(s) :return:
backward
python
wildltr/ptranking
ptranking/base/utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/utils.py
MIT
def forward(ctx, input, sigma=1.0, device='cpu'): ''' :param ctx: :param input: the input tensor :param sigma: the scaling constant :return: ''' x = input if 1.0==sigma else sigma * input torch_half = torch.tensor([0.5], dtype=torch.float, device=device) sigmoid_x_pos = torch.where(input>0, 1./(1. + torch.exp(-x)), torch_half) exp_x = torch.exp(x) sigmoid_x = torch.where(input<0, exp_x/(1.+exp_x), sigmoid_x_pos) grad = sigmoid_x * (1. - sigmoid_x) if 1.0==sigma else sigma * sigmoid_x * (1. - sigmoid_x) ctx.save_for_backward(grad) return sigmoid_x
:param ctx: :param input: the input tensor :param sigma: the scaling constant :return:
forward
python
wildltr/ptranking
ptranking/base/utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/utils.py
MIT
def backward(ctx, grad_output): ''' :param ctx: :param grad_output: backpropagated gradients from upper module(s) :return: ''' grad = ctx.saved_tensors[0] bg = grad_output * grad # chain rule return bg, None, None
:param ctx: :param grad_output: backpropagated gradients from upper module(s) :return:
backward
python
wildltr/ptranking
ptranking/base/utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/utils.py
MIT
def __init__(self, num_features, momentum=0.1, affine=True, track_running_stats=False): ''' @param num_features: C from an expected input of size (N, C, L) or from input of size (N, L) @param momentum: the value used for the running_mean and running_var computation. Can be set to None for cumulative moving average (i.e. simple average). Default: 0.1 @param affine: a boolean value that when set to True, this module has learnable affine parameters. Default: True @param track_running_stats: ''' super(LTRBatchNorm, self).__init__() self.bn = nn.BatchNorm1d(num_features, momentum=momentum, affine=affine, track_running_stats=track_running_stats)
@param num_features: C from an expected input of size (N, C, L) or from input of size (N, L) @param momentum: the value used for the running_mean and running_var computation. Can be set to None for cumulative moving average (i.e. simple average). Default: 0.1 @param affine: a boolean value that when set to True, this module has learnable affine parameters. Default: True @param track_running_stats:
__init__
python
wildltr/ptranking
ptranking/base/utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/utils.py
MIT
def __init__(self, num_features, momentum=0.1, affine=True, device=None): ''' In the context of learning-to-rank, batch normalization is conducted at a per-query level, namely across documents associated with the same query. @param num_features: the number of features @param num_dims: is assumed to be [num_queries, num_docs, num_features] ''' super().__init__() shape = (1, 1, num_features) # The scale parameter and the shift parameter (model parameters) are initialized to 1 and 0, respectively self.gamma = nn.Parameter(torch.ones(shape, device=device)) self.beta = nn.Parameter(torch.zeros(shape, device=device)) # The variables that are not model parameters are initialized to 0 and 1 self.moving_mean = torch.zeros(shape, device=device) self.moving_var = torch.ones(shape, device=device) self.momentum = momentum self.affine = affine if self.affine: self.weight = nn.Parameter(torch.ones(shape, device=device)) self.bias = nn.Parameter(torch.zeros(shape, device=device))
In the context of learning-to-rank, batch normalization is conducted at a per-query level, namely across documents associated with the same query. @param num_features: the number of features @param num_dims: is assumed to be [num_queries, num_docs, num_features]
__init__
python
wildltr/ptranking
ptranking/base/utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/utils.py
MIT
def get_stacked_FFNet(ff_dims=None, AF=None, TL_AF=None, apply_tl_af=False, dropout=0.1, BN=True, bn_type=None, bn_affine=False, device='cpu', split_penultimate_layer=False): ''' Generate one stacked feed-forward network. ''' # '2' refers to the simplest case: num_features, out_dim assert ff_dims is not None and len(ff_dims) >= 2 ff_net = nn.Sequential() num_layers = len(ff_dims) if num_layers > 2: for i in range(1, num_layers-1): prior_dim, ff_i_dim = ff_dims[i - 1], ff_dims[i] ff_net.add_module('_'.join(['dr', str(i)]), nn.Dropout(dropout)) nr_hi = nn.Linear(prior_dim, ff_i_dim) nr_init(nr_hi.weight) ff_net.add_module('_'.join(['ff', str(i + 1)]), nr_hi) if BN: # before applying activation if 'BN' == bn_type: bn_i = LTRBatchNorm(ff_i_dim, momentum=0.1, affine=bn_affine, track_running_stats=False) elif 'BN2' == bn_type: bn_i = LTRBatchNorm2(ff_i_dim, momentum=0.1, affine=bn_affine, device=device) else: raise NotImplementedError ff_net.add_module('_'.join(['bn', str(i + 1)]), bn_i) ff_net.add_module('_'.join(['act', str(i + 1)]), get_AF(AF)) # last layer penultimate_dim, out_dim = ff_dims[-2], ff_dims[-1] nr_hn = nn.Linear(penultimate_dim, out_dim) nr_init(nr_hn.weight) if split_penultimate_layer: tail_net = nn.Sequential() tail_net.add_module('_'.join(['ff', str(num_layers)]), nr_hn) if apply_tl_af: if BN: # before applying activation if 'BN' == bn_type: tail_net.add_module('_'.join(['bn', str(num_layers)]), LTRBatchNorm(out_dim, momentum=0.1, affine=bn_affine, track_running_stats=False)) elif 'BN2' == bn_type: tail_net.add_module('_'.join(['bn', str(num_layers)]), LTRBatchNorm2(out_dim, momentum=0.1, affine=bn_affine, device=device)) else: raise NotImplementedError tail_net.add_module('_'.join(['act', str(num_layers)]), get_AF(TL_AF)) return ff_net, tail_net else: ff_net.add_module('_'.join(['ff', str(num_layers)]), nr_hn) if apply_tl_af: if BN: # before applying activation if 'BN' == bn_type: ff_net.add_module('_'.join(['bn', str(num_layers)]), LTRBatchNorm(out_dim, momentum=0.1, affine=bn_affine, track_running_stats=False)) elif 'BN2' == bn_type: ff_net.add_module('_'.join(['bn', str(num_layers)]), LTRBatchNorm2(out_dim, momentum=0.1, affine=bn_affine, device=device)) else: raise NotImplementedError ff_net.add_module('_'.join(['act', str(num_layers)]), get_AF(TL_AF)) return ff_net
Generate one stacked feed-forward network.
get_stacked_FFNet
python
wildltr/ptranking
ptranking/base/utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/utils.py
MIT
def get_data_meta(data_id=None): """ Get the meta-information corresponding to the specified dataset """ if data_id in MSLRWEB: max_rele_level = 4 label_type = LABEL_TYPE.MultiLabel num_features = 136 has_comment = False fold_num = 5 elif data_id in MSLETOR_SUPER: max_rele_level = 2 label_type = LABEL_TYPE.MultiLabel num_features = 46 has_comment = True fold_num = 5 elif data_id in MSLETOR_SEMI: max_rele_level = 2 label_type = LABEL_TYPE.MultiLabel num_features = 46 has_comment = True fold_num = 5 elif data_id in MSLETOR_LIST: max_rele_level = None label_type = LABEL_TYPE.Permutation num_features = 46 has_comment = True fold_num = 5 elif data_id in YAHOO_LTR: max_rele_level = 4 label_type = LABEL_TYPE.MultiLabel num_features = 700 # libsvm format, rather than uniform number has_comment = False fold_num = 1 elif data_id in YAHOO_LTR_5Fold: max_rele_level = 4 label_type = LABEL_TYPE.MultiLabel num_features = 700 # libsvm format, rather than uniform number has_comment = False fold_num = 5 elif data_id in ISTELLA_LTR: max_rele_level = 4 label_type = LABEL_TYPE.MultiLabel num_features = 220 # libsvm format, rather than uniform number fold_num = 1 if data_id in ['Istella_S', 'Istella']: has_comment = False else: has_comment = True else: raise NotImplementedError data_meta = dict(num_features=num_features, has_comment=has_comment, label_type=label_type, max_rele_level=max_rele_level, fold_num=fold_num) return data_meta
Get the meta-information corresponding to the specified dataset
get_data_meta
python
wildltr/ptranking
ptranking/data/data_utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/data/data_utils.py
MIT
def get_scaler_setting(data_id, scaler_id=None): """ A default scaler-setting for loading a dataset :param data_id: :param grid_search: used for grid-search :return: """ ''' According to {Introducing {LETOR} 4.0 Datasets}, "QueryLevelNorm version: Conduct query level normalization based on data in MIN version. This data can be directly used for learning. We further provide 5 fold partitions of this version for cross fold validation". --> Thus there is no need to perform query_level_scale again for {MQ2007_super | MQ2008_super | MQ2007_semi | MQ2008_semi} --> But for {MSLRWEB10K | MSLRWEB30K}, the query-level normalization is ## not conducted yet##. --> For {Yahoo_LTR_Set_1 | Yahoo_LTR_Set_1 }, the query-level normalization is already conducted. --> For Istella! LETOR, the query-level normalization is not conducted yet. We note that ISTELLA contains extremely large features, e.g., 1.79769313486e+308, we replace features of this kind with a constant 1000000. ''' if scaler_id is None: if data_id in MSLRWEB or data_id in ISTELLA_LTR: scale_data = True scaler_id = 'StandardScaler' # ['MinMaxScaler', 'StandardScaler'] scaler_level = 'QUERY' # SCALER_LEVEL = ['QUERY', 'DATASET'] else: scale_data = False scaler_id = None scaler_level = None else: scale_data = True scaler_level = 'QUERY' return scale_data, scaler_id, scaler_level
A default scaler-setting for loading a dataset :param data_id: :param grid_search: used for grid-search :return:
get_scaler_setting
python
wildltr/ptranking
ptranking/data/data_utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/data/data_utils.py
MIT
def iter_lines(lines, has_targets=True, one_indexed=True, missing=0.0, has_comment=False): """ Transforms an iterator of lines to an iterator of LETOR rows. Each row is represented by a (x, y, qid, comment) tuple. Parameters ---------- lines : iterable of lines Lines to parse. has_targets : bool, optional, i.e., the relevance label Whether the file contains targets. If True, will expect the first token every line to be a real representing the sample's target (i.e. score). If False, will use -1 as a placeholder for all targets. one_indexed : bool, optional, i.e., whether the index of the first feature is 1 Whether feature ids are one-indexed. If True, will subtract 1 from each feature id. missing : float, optional Placeholder to use if a feature value is not provided for a sample. Yields ------ x : array of floats Feature vector of the sample. y : float Target value (score) of the sample, or -1 if no target was parsed. qid : object Query id of the sample. This is currently guaranteed to be a string. comment : str Comment accompanying the sample. """ for line in lines: #print(line) if has_comment: data, _, comment = line.rstrip().partition('#') toks = data.split() else: toks = line.rstrip().split() num_features = 0 feature_vec = np.repeat(missing, 8) std_score = -1.0 if has_targets: std_score = float(toks[0]) toks = toks[1:] qid = _parse_qid_tok(toks[0]) for tok in toks[1:]: fid, _, val = tok.partition(':') fid = int(fid) val = float(val) if one_indexed: fid -= 1 assert fid >= 0 while len(feature_vec) <= fid: orig = len(feature_vec) feature_vec.resize(len(feature_vec) * 2) feature_vec[orig:orig * 2] = missing feature_vec[fid] = val num_features = max(fid + 1, num_features) assert num_features > 0 feature_vec.resize(num_features) if has_comment: yield (feature_vec, std_score, qid, comment) else: yield (feature_vec, std_score, qid)
Transforms an iterator of lines to an iterator of LETOR rows. Each row is represented by a (x, y, qid, comment) tuple. Parameters ---------- lines : iterable of lines Lines to parse. has_targets : bool, optional, i.e., the relevance label Whether the file contains targets. If True, will expect the first token every line to be a real representing the sample's target (i.e. score). If False, will use -1 as a placeholder for all targets. one_indexed : bool, optional, i.e., whether the index of the first feature is 1 Whether feature ids are one-indexed. If True, will subtract 1 from each feature id. missing : float, optional Placeholder to use if a feature value is not provided for a sample. Yields ------ x : array of floats Feature vector of the sample. y : float Target value (score) of the sample, or -1 if no target was parsed. qid : object Query id of the sample. This is currently guaranteed to be a string. comment : str Comment accompanying the sample.
iter_lines
python
wildltr/ptranking
ptranking/data/data_utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/data/data_utils.py
MIT
def parse_letor(source, has_targets=True, one_indexed=True, missing=0.0, has_comment=False): """ Parses a LETOR dataset from `source`. Parameters ---------- source : string or iterable of lines String, file, or other file-like object to parse. has_targets : bool, optional one_indexed : bool, optional missing : float, optional Returns ------- X : array of arrays of floats Feature matrix (see `iter_lines`). y : array of floats Target vector (see `iter_lines`). qids : array of objects Query id vector (see `iter_lines`). comments : array of strs Comment vector (see `iter_lines`). """ max_width = 0 feature_vecs, std_scores, qids = [], [], [] if has_comment: comments = [] it = iter_lines(source, has_targets=has_targets, one_indexed=one_indexed, missing=missing, has_comment=has_comment) if has_comment: for f_vec, s, qid, comment in it: feature_vecs.append(f_vec) std_scores.append(s) qids.append(qid) comments.append(comment) max_width = max(max_width, len(f_vec)) else: for f_vec, s, qid in it: feature_vecs.append(f_vec) std_scores.append(s) qids.append(qid) max_width = max(max_width, len(f_vec)) assert max_width > 0 all_features_mat = np.ndarray((len(feature_vecs), max_width), dtype=np.float64) all_features_mat.fill(missing) for i, x in enumerate(feature_vecs): all_features_mat[i, :len(x)] = x all_labels_vec = np.array(std_scores) if has_comment: docids = [_parse_docid(comment) for comment in comments] #features, std_scores, qids, docids return all_features_mat, all_labels_vec, qids, docids else: # features, std_scores, qids return all_features_mat, all_labels_vec, qids
Parses a LETOR dataset from `source`. Parameters ---------- source : string or iterable of lines String, file, or other file-like object to parse. has_targets : bool, optional one_indexed : bool, optional missing : float, optional Returns ------- X : array of arrays of floats Feature matrix (see `iter_lines`). y : array of floats Target vector (see `iter_lines`). qids : array of objects Query id vector (see `iter_lines`). comments : array of strs Comment vector (see `iter_lines`).
parse_letor
python
wildltr/ptranking
ptranking/data/data_utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/data/data_utils.py
MIT
def clip_query_data(qid, list_docids=None, feature_mat=None, std_label_vec=None, binary_rele=False, unknown_as_zero=False, clip_query=None, min_docs=None, min_rele=1, presort=None): """ Clip the data associated with the same query if required """ if binary_rele: std_label_vec = np.clip(std_label_vec, a_min=-10, a_max=1) # to binary labels if unknown_as_zero: std_label_vec = np.clip(std_label_vec, a_min=0, a_max=10) # convert unknown as zero if clip_query: if feature_mat.shape[0] < min_docs: # skip queries with documents that are fewer the pre-specified min_docs return None if (std_label_vec > 0).sum() < min_rele: # skip queries with no standard relevant documents, since there is no meaning for both training and testing. return None assert presort is not None if presort: ''' Possible advantages: 1> saving time for evaluation; 2> saving time for some models, say the ones need optimal ranking ''' des_inds = np_arg_shuffle_ties(std_label_vec, descending=True) # sampling by shuffling ties feature_mat, std_label_vec = feature_mat[des_inds], std_label_vec[des_inds] ''' if list_docids is None: list_docids = None else: list_docids = [] for ind in des_inds: list_docids.append(list_docids[ind]) ''' return (qid, feature_mat, std_label_vec)
Clip the data associated with the same query if required
clip_query_data
python
wildltr/ptranking
ptranking/data/data_utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/data/data_utils.py
MIT
def iter_queries(in_file, presort=None, data_dict=None, scale_data=None, scaler_id=None, perquery_file=None, buffer=True): ''' Transforms an iterator of rows to an iterator of queries (i.e., a unit of all the documents and labels associated with the same query). Each query is represented by a (qid, feature_mat, std_label_vec) tuple. :param in_file: :param has_comment: :param query_level_scale: perform query-level scaling, say normalization :param scaler: MinMaxScaler | RobustScaler :param unknown_as_zero: if not labled, regard the relevance degree as zero :return: ''' assert presort is not None if os.path.exists(perquery_file): return pickle_load(perquery_file) if scale_data: scaler = get_scaler(scaler_id=scaler_id) min_docs, min_rele = data_dict['min_docs'], data_dict['min_rele'] unknown_as_zero, binary_rele, has_comment = data_dict['unknown_as_zero'], data_dict['binary_rele'], data_dict['has_comment'] clip_query = False if min_rele is not None and min_rele > 0: clip_query = True if min_docs is not None and min_docs > 0: clip_query = True list_Qs = [] print(in_file) with open(in_file, encoding='iso-8859-1') as file_obj: dict_data = dict() if has_comment: all_features_mat, all_labels_vec, qids, docids = parse_letor(file_obj.readlines(), has_comment=True) for i in range(len(qids)): f_vec = all_features_mat[i, :] std_s = all_labels_vec[i] qid = qids[i] docid = docids[i] if qid in dict_data: dict_data[qid].append((std_s, docid, f_vec)) else: dict_data[qid] = [(std_s, docid, f_vec)] del all_features_mat # unique qids seen = set() seen_add = seen.add # sequential unique id qids_unique = [x for x in qids if not (x in seen or seen_add(x))] for qid in qids_unique: tmp = list(zip(*dict_data[qid])) list_labels_per_q = tmp[0] if data_dict['data_id'] in MSLETOR_LIST: ''' convert the original rank-position into grade-labels ''' ranking_size = len(list_labels_per_q) list_labels_per_q = [ranking_size-r for r in list_labels_per_q] #list_docids_per_q = tmp[1] list_features_per_q = tmp[2] feature_mat = np.vstack(list_features_per_q) if scale_data: if data_dict['data_id'] in ISTELLA_LTR: # due to the possible extremely large features, e.g., 1.79769313486e+308 feature_mat = scaler.fit_transform(np.clip(feature_mat, a_min=None, a_max=ISTELLA_MAX)) else: feature_mat = scaler.fit_transform(feature_mat) Q = clip_query_data(qid=qid, feature_mat=feature_mat, std_label_vec=np.array(list_labels_per_q), binary_rele=binary_rele, unknown_as_zero=unknown_as_zero, clip_query=clip_query, min_docs=min_docs, min_rele=min_rele, presort=presort) if Q is not None: list_Qs.append(Q) else: if data_dict['data_id'] in YAHOO_LTR: all_features_mat, all_labels_vec, qids = parse_letor(file_obj.readlines(), has_comment=False, one_indexed=False) else: all_features_mat, all_labels_vec, qids = parse_letor(file_obj.readlines(), has_comment=False) for i in range(len(qids)): f_vec = all_features_mat[i, :] std_s = all_labels_vec[i] qid = qids[i] if qid in dict_data: dict_data[qid].append((std_s, f_vec)) else: dict_data[qid] = [(std_s, f_vec)] del all_features_mat # unique qids seen = set() seen_add = seen.add # sequential unique id qids_unique = [x for x in qids if not (x in seen or seen_add(x))] for qid in qids_unique: tmp = list(zip(*dict_data[qid])) list_labels_per_q = tmp[0] if data_dict['data_id'] in MSLETOR_LIST: ''' convert the original rank-position into grade-labels ''' ranking_size = len(list_labels_per_q) list_labels_per_q = [ranking_size-r for r in list_labels_per_q] list_features_per_q = tmp[1] feature_mat = np.vstack(list_features_per_q) if scale_data: if data_dict['data_id'] in ISTELLA_LTR: # due to the possible extremely large features, e.g., 1.79769313486e+308 feature_mat = scaler.fit_transform(np.clip(feature_mat, a_min=None, a_max=ISTELLA_MAX)) else: feature_mat = scaler.fit_transform(feature_mat) Q = clip_query_data(qid=qid, feature_mat=feature_mat, std_label_vec=np.array(list_labels_per_q), binary_rele=binary_rele, unknown_as_zero=unknown_as_zero, clip_query=clip_query, min_docs=min_docs, min_rele=min_rele, presort=presort) if Q is not None: list_Qs.append(Q) if buffer: assert perquery_file is not None parent_dir = Path(perquery_file).parent if not os.path.exists(parent_dir): os.makedirs(parent_dir) pickle_save(list_Qs, file=perquery_file) return list_Qs
Transforms an iterator of rows to an iterator of queries (i.e., a unit of all the documents and labels associated with the same query). Each query is represented by a (qid, feature_mat, std_label_vec) tuple. :param in_file: :param has_comment: :param query_level_scale: perform query-level scaling, say normalization :param scaler: MinMaxScaler | RobustScaler :param unknown_as_zero: if not labled, regard the relevance degree as zero :return:
iter_queries
python
wildltr/ptranking
ptranking/data/data_utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/data/data_utils.py
MIT
def pre_allocate_batch(dict_univ_bin, num_docs_per_batch): ''' Based on the expected number of documents to process within a single batch, we merge the queries that have the same number of documents to form a batch @param dict_univ_bin: [unique_value, bin of index] @param num_docs_per_batch: @return: ''' list_batch_inds = [] if 1 == num_docs_per_batch: # a simple but time-consuming per-query processing, namely the batch_size is always one for univ in dict_univ_bin: bin = dict_univ_bin[univ] for index in bin: single_ind_as_batch = [index] list_batch_inds.append(single_ind_as_batch) return list_batch_inds else: for univ in dict_univ_bin: bin = dict_univ_bin[univ] bin_length = len(bin) if univ * bin_length < num_docs_per_batch: # merge all queries as one batch list_batch_inds.append(bin) else: if univ < num_docs_per_batch: # split with an approximate value num_inds_per_batch = num_docs_per_batch // univ for i in range(0, bin_length, num_inds_per_batch): sub_bin = bin[i: min(i+num_inds_per_batch, bin_length)] list_batch_inds.append(sub_bin) else: # one single query as a batch for index in bin: single_ind_as_batch = [index] list_batch_inds.append(single_ind_as_batch) return list_batch_inds
Based on the expected number of documents to process within a single batch, we merge the queries that have the same number of documents to form a batch @param dict_univ_bin: [unique_value, bin of index] @param num_docs_per_batch: @return:
pre_allocate_batch
python
wildltr/ptranking
ptranking/data/data_utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/data/data_utils.py
MIT
def __init__(self, data_source, percent=.01): ''' @param data_source: dataset to sample from @param percent: the ratio of being used part ''' num_queries = data_source.__len__() #print('num_queries', num_queries) num_used_queries = int(num_queries * percent) self.list_used_inds = list(np.random.permutation(num_queries)[0:num_used_queries])
@param data_source: dataset to sample from @param percent: the ratio of being used part
__init__
python
wildltr/ptranking
ptranking/data/data_utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/data/data_utils.py
MIT
def get_buffer_file_name_libsvm(in_file, data_id=None, eval_dict=None, need_group=True): """ get absolute paths of data file and group file """ if data_id in MSLETOR or data_id in MSLRWEB: buffer_prefix = in_file.replace('Fold', 'BufferedFold') file_buffered_data = buffer_prefix.replace('txt', 'data') if need_group: file_buffered_group = buffer_prefix.replace('txt', 'group') elif data_id in YAHOO_LTR: buffer_prefix = in_file[:in_file.find('.txt')].replace(data_id.lower() + '.', 'Buffered' + data_id + '/') file_buffered_data = buffer_prefix + '.data' if need_group: file_buffered_group = buffer_prefix + '.group' elif data_id in ISTELLA_LTR: buffer_prefix = in_file[:in_file.find('.txt')].replace(data_id, 'Buffered_' + data_id) file_buffered_data = buffer_prefix + '.data' if need_group: file_buffered_group = buffer_prefix + '.group' else: raise NotImplementedError if eval_dict is not None and eval_dict['mask_label']: mask_ratio = eval_dict['mask_ratio'] mask_type = eval_dict['mask_type'] mask_label_str = '_'.join([mask_type, 'Ratio', '{:,g}'.format(mask_ratio)]) file_buffered_data = file_buffered_data.replace('.data', '_'+mask_label_str+'.data') file_buffered_group = file_buffered_group.replace('.group', '_'+mask_label_str+'.group') if need_group: return file_buffered_data, file_buffered_group else: return file_buffered_data
get absolute paths of data file and group file
get_buffer_file_name_libsvm
python
wildltr/ptranking
ptranking/data/data_utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/data/data_utils.py
MIT
def letor_to_libsvm(doc_reprs=None, doc_labels=None, output_feature=None, output_group=None, need_group=False): ''' convert query-level letor-data to libsvm data ''' num_docs = doc_reprs.shape[0] if need_group: output_group.write(str(num_docs) + "\n") # group file for i in range(num_docs): # per document only include nonzero features feats = doc_reprs[i, :].tolist() libsvm_feats = [] for key, val in enumerate(feats): if val != 0.0: libsvm_feats.append(':'.join([str(key+1), str(val)])) output_feature.write(str(doc_labels[i]) + " " + " ".join(libsvm_feats) + "\n")
convert query-level letor-data to libsvm data
letor_to_libsvm
python
wildltr/ptranking
ptranking/data/data_utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/data/data_utils.py
MIT
def load_letor_data_as_libsvm_data(in_file, split_type=None, data_id=None, min_docs=None, min_rele=None, data_dict=None, eval_dict=None, need_group=True, presort=None, scaler_id=None): """ Load data by firstly converting letor data as libsvm data :param in_file: :param min_docs: :param min_rele: :param data_id: :param eval_dict: :param need_group: required w.r.t. xgboost, lightgbm :return: """ assert data_id is not None or data_dict is not None if data_dict is None: scale_data, scaler_id, scaler_level = get_scaler_setting(data_id=data_id, scaler_id=scaler_id) data_dict = dict(data_id=data_id, min_docs=min_docs, min_rele=min_rele, binary_rele=False, unknown_as_zero = False, scale_data=scale_data, scaler_id=scaler_id, scaler_level=scaler_level) data_meta = get_data_meta(data_id=data_id) data_dict.update(data_meta) elif data_id is None: data_id = data_dict['data_id'] if need_group: file_buffered_data, file_buffered_group = get_buffer_file_name_libsvm(in_file, data_id=data_id, eval_dict=eval_dict, need_group=True) else: file_buffered_data = get_buffer_file_name_libsvm(in_file, data_id=data_id, eval_dict=eval_dict, need_group=False) if os.path.exists(file_buffered_data): if need_group: return file_buffered_data, file_buffered_group else: return file_buffered_data else: parent_dir = Path(file_buffered_data).parent if not os.path.exists(parent_dir): os.makedirs(parent_dir) output_feature = open(file_buffered_data, "w") if need_group: output_group = open(file_buffered_group, "w") perquery_file = get_buffer_file_name(data_id=data_id, file=in_file, data_dict=data_dict, presort=presort) list_Qs = iter_queries(in_file=in_file, data_dict=data_dict, scale_data=data_dict['scale_data'], scaler_id=data_dict['scaler_id'], perquery_file=perquery_file, buffer=True, presort=presort) if eval_dict is not None and eval_dict['mask_label'] and split_type==SPLIT_TYPE.Train: if MASK_TYPE.rand_mask_rele == MASK_TYPE[eval_dict['mask_type']]: for qid, doc_reprs, doc_labels in list_Qs: doc_labels = np_random_mask_rele_labels(batch_label=doc_labels, mask_ratio=eval_dict['mask_ratio'], mask_value=0) if doc_labels is not None: letor_to_libsvm(doc_reprs=doc_reprs.astype(np.float32), doc_labels=doc_labels.astype(np.int), output_feature=output_feature, output_group=output_group, need_group=need_group) elif MASK_TYPE.rand_mask_all == MASK_TYPE[eval_dict['mask_type']]: for qid, doc_reprs, doc_labels in list_Qs: doc_labels = np_random_mask_all_labels(batch_label=doc_labels, mask_ratio=eval_dict['mask_ratio'], mask_value=0) if doc_labels is not None: letor_to_libsvm(doc_reprs=doc_reprs.astype(np.float32), doc_labels=doc_labels.astype(np.int), output_feature=output_feature, output_group=output_group, need_group=need_group) else: raise NotImplementedError else: for qid, doc_reprs, doc_labels in list_Qs: letor_to_libsvm(doc_reprs=doc_reprs.astype(np.float32), doc_labels=doc_labels.astype(np.int), output_feature=output_feature, output_group=output_group, need_group=need_group) output_group.close() output_feature.close() if need_group: return file_buffered_data, file_buffered_group else: return file_buffered_data
Load data by firstly converting letor data as libsvm data :param in_file: :param min_docs: :param min_rele: :param data_id: :param eval_dict: :param need_group: required w.r.t. xgboost, lightgbm :return:
load_letor_data_as_libsvm_data
python
wildltr/ptranking
ptranking/data/data_utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/data/data_utils.py
MIT
def random_mask_all_labels(batch_ranking, batch_label, mask_ratio, mask_value=0, presort=False): ''' Mask the ground-truth labels with the specified ratio as '0'. Meanwhile, re-sort according to the labels if required. :param doc_reprs: :param doc_labels: :param mask_ratio: the ratio of labels to be masked :param mask_value: :param presort: :return: ''' size_ranking = batch_label.size(1) num_to_mask = int(size_ranking*mask_ratio) mask_ind = np.random.choice(size_ranking, size=num_to_mask, replace=False) batch_label[:, mask_ind] = mask_value if torch.gt(batch_label, torch_zero).any(): # whether the masked one includes explicit positive labels if presort: # re-sort according to the labels if required std_labels = torch.squeeze(batch_label) sorted_labels, sorted_inds = torch.sort(std_labels, descending=True) batch_label = torch.unsqueeze(sorted_labels, dim=0) batch_ranking = batch_ranking[:, sorted_inds, :] return batch_ranking, batch_label else: return None
Mask the ground-truth labels with the specified ratio as '0'. Meanwhile, re-sort according to the labels if required. :param doc_reprs: :param doc_labels: :param mask_ratio: the ratio of labels to be masked :param mask_value: :param presort: :return:
random_mask_all_labels
python
wildltr/ptranking
ptranking/data/data_utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/data/data_utils.py
MIT
def random_mask_rele_labels(batch_ranking, batch_label=None, mask_ratio=None, mask_value=0, presort=False): ''' Mask the ground-truth labels with the specified ratio as '0'. Meanwhile, re-sort according to the labels if required. :param doc_reprs: :param doc_labels: :param mask_ratio: the ratio of labels to be masked :param mask_value: :param presort: :return: ''' assert 1 == batch_label.size(0) # todo for larger batch-size, need to per-dimension masking # squeeze for easy process docs, labels = torch.squeeze(batch_ranking, dim=0), torch.squeeze(batch_label) all_rele_inds = torch.gt(labels, torch_zero).nonzero() num_rele = all_rele_inds.size()[0] num_to_mask = int(num_rele*mask_ratio) mask_inds = np.random.choice(num_rele, size=num_to_mask, replace=False) rele_inds_to_mask = all_rele_inds[mask_inds, 0] # the 0-column corresponds to original rele index since all_rele_inds.size()=(num_rele, 1) batch_label[:, rele_inds_to_mask] = mask_value if torch.gt(batch_label, torch_zero).any(): # whether the masked one includes explicit positive labels if presort: # re-sort according to the labels if required std_labels = torch.squeeze(batch_label) sorted_labels, sorted_inds = torch.sort(std_labels, descending=True) batch_label = torch.unsqueeze(sorted_labels, dim=0) batch_ranking = batch_ranking[:, sorted_inds, :] return batch_ranking, batch_label else: # only supports enough rele labels raise NotImplementedError
Mask the ground-truth labels with the specified ratio as '0'. Meanwhile, re-sort according to the labels if required. :param doc_reprs: :param doc_labels: :param mask_ratio: the ratio of labels to be masked :param mask_value: :param presort: :return:
random_mask_rele_labels
python
wildltr/ptranking
ptranking/data/data_utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/data/data_utils.py
MIT
def np_random_mask_all_labels(batch_label, mask_ratio, mask_value=0): ''' Mask the ground-truth labels with the specified ratio as '0'. ''' size_ranking = len(batch_label) num_to_mask = int(size_ranking*mask_ratio) mask_ind = np.random.choice(size_ranking, size=num_to_mask, replace=False) batch_label[mask_ind] = mask_value if np.greater(batch_label, 0.0).any(): # whether the masked one includes explicit positive labels return batch_label else: return None
Mask the ground-truth labels with the specified ratio as '0'.
np_random_mask_all_labels
python
wildltr/ptranking
ptranking/data/data_utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/data/data_utils.py
MIT
def np_random_mask_rele_labels(batch_label, mask_ratio, mask_value=0): ''' Mask the ground-truth labels with the specified ratio as '0'. ''' all_rele_inds = np.greater(batch_label, 0).nonzero()[0] # due to one-dimension #print('all_rele_inds', all_rele_inds) num_rele = all_rele_inds.shape[0] #print('num_rele', num_rele) num_to_mask = int(num_rele*mask_ratio) mask_inds = np.random.choice(num_rele, size=num_to_mask, replace=False) #print('mask_inds', mask_inds) rele_inds_to_mask = all_rele_inds[mask_inds] #print('rele_inds_to_mask', rele_inds_to_mask)sss batch_label[rele_inds_to_mask] = mask_value if np.greater(batch_label, 0.0).any(): # whether the masked one includes explicit positive labels return batch_label else: return None
Mask the ground-truth labels with the specified ratio as '0'.
np_random_mask_rele_labels
python
wildltr/ptranking
ptranking/data/data_utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/data/data_utils.py
MIT
def display_information(self, data_dict, model_para_dict, reproduce=False): """ Display some information. :param data_dict: :param model_para_dict: :return: """ if self.gpu: print('-- GPU({}) is launched --'.format(self.device)) else: print('Only CPU is used.') if reproduce: print(' '.join(['\nReproducing results for {} on {} >>>'.format(model_para_dict['model_id'], data_dict['data_id'])])) else: print(' '.join(['\nStart {} on {} >>>'.format(model_para_dict['model_id'], data_dict['data_id'])]))
Display some information. :param data_dict: :param model_para_dict: :return:
display_information
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/ltr.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/ltr.py
MIT
def check_consistency(self, data_dict, eval_dict, sf_para_dict): """ Check whether the settings are reasonable in the context of adhoc learning-to-rank """ ''' Part-1: data loading ''' if data_dict['data_id'] == 'Istella': assert eval_dict['do_validation'] is not True # since there is no validation data if data_dict['data_id'] in MSLETOR_SEMI: assert data_dict['train_presort'] is not True # due to the non-labeled documents if data_dict['binary_rele']: # for unsupervised dataset, it is required for binarization due to '-1' labels assert data_dict['unknown_as_zero'] else: assert data_dict['unknown_as_zero'] is not True # since there is no non-labeled documents if data_dict['data_id'] in MSLETOR_LIST: # for which the standard ltr_adhoc of each query is unique assert 1 == data_dict['train_batch_size'] if data_dict['scale_data']: scaler_level = data_dict['scaler_level'] if 'scaler_level' in data_dict else None assert not scaler_level== 'DATASET' # not supported setting assert data_dict['validation_presort'] # Rule of thumb setting for adhoc learning-to-rank assert data_dict['test_presort'] # Rule of thumb setting for adhoc learning-to-rank ''' Part-2: evaluation setting ''' if eval_dict['mask_label']: # True is aimed to use supervised data to mimic semi-supervised data by masking assert not data_dict['data_id'] in MSLETOR_SEMI
Check whether the settings are reasonable in the context of adhoc learning-to-rank
check_consistency
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/ltr.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/ltr.py
MIT
def determine_files(self, data_dict, fold_k=None): """ Determine the file path correspondingly. :param data_dict: :param fold_k: :return: """ if data_dict['data_id'] in YAHOO_LTR: file_train, file_vali, file_test = os.path.join(data_dict['dir_data'], data_dict['data_id'].lower() + '.train.txt'),\ os.path.join(data_dict['dir_data'], data_dict['data_id'].lower() + '.valid.txt'),\ os.path.join(data_dict['dir_data'], data_dict['data_id'].lower() + '.test.txt') elif data_dict['data_id'] in ISTELLA_LTR: if data_dict['data_id'] == 'Istella_X' or data_dict['data_id']=='Istella_S': file_train, file_vali, file_test = data_dict['dir_data'] + 'train.txt', data_dict['dir_data'] + 'vali.txt', data_dict['dir_data'] + 'test.txt' else: file_vali = None file_train, file_test = data_dict['dir_data'] + 'train.txt', data_dict['dir_data'] + 'test.txt' else: print('Fold-', fold_k) fold_k_dir = data_dict['dir_data'] + 'Fold' + str(fold_k) + '/' file_train, file_vali, file_test = fold_k_dir + 'train.txt', fold_k_dir + 'vali.txt', fold_k_dir + 'test.txt' return file_train, file_vali, file_test
Determine the file path correspondingly. :param data_dict: :param fold_k: :return:
determine_files
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/ltr.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/ltr.py
MIT
def load_data(self, eval_dict, data_dict, fold_k): """ Load the dataset correspondingly. :param eval_dict: :param data_dict: :param fold_k: :param model_para_dict: :return: """ file_train, file_vali, file_test = self.determine_files(data_dict, fold_k=fold_k) input_eval_dict = eval_dict if eval_dict['mask_label'] else None # required when enabling masking data _train_data = LTRDataset(file=file_train, split_type=SPLIT_TYPE.Train, presort=data_dict['train_presort'], data_dict=data_dict, eval_dict=input_eval_dict) train_letor_sampler = LETORSampler(data_source=_train_data, rough_batch_size=data_dict['train_rough_batch_size']) train_loader = torch.utils.data.DataLoader(_train_data, batch_sampler=train_letor_sampler, num_workers=0) _test_data = LTRDataset(file=file_test, split_type=SPLIT_TYPE.Test, data_dict=data_dict, presort=data_dict['test_presort']) test_letor_sampler = LETORSampler(data_source=_test_data, rough_batch_size=data_dict['test_rough_batch_size']) test_loader = torch.utils.data.DataLoader(_test_data, batch_sampler=test_letor_sampler, num_workers=0) if eval_dict['do_validation'] or eval_dict['do_summary']: # vali_data is required _vali_data = LTRDataset(file=file_vali, split_type=SPLIT_TYPE.Validation, data_dict=data_dict, presort=data_dict['validation_presort']) vali_letor_sampler = LETORSampler(data_source=_vali_data, rough_batch_size=data_dict['validation_rough_batch_size']) vali_loader = torch.utils.data.DataLoader(_vali_data, batch_sampler=vali_letor_sampler, num_workers=0) else: vali_loader = None return train_loader, test_loader, vali_loader
Load the dataset correspondingly. :param eval_dict: :param data_dict: :param fold_k: :param model_para_dict: :return:
load_data
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/ltr.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/ltr.py
MIT
def load_ranker(self, sf_para_dict, model_para_dict): """ Load a ranker correspondingly :param sf_para_dict: :param model_para_dict: :param kwargs: :return: """ model_id = model_para_dict['model_id'] if model_id in ['RankMSE', 'ListMLE', 'ListNet', 'RankCosine', 'DASALC', 'HistogramAP']: ranker = globals()[model_id](sf_para_dict=sf_para_dict, gpu=self.gpu, device=self.device) elif model_id in ['RankNet', 'LambdaRank', 'STListNet', 'ApproxNDCG', 'DirectOpt', 'LambdaLoss', 'MarginLambdaLoss', 'MDPRank', 'ExpectedUtility', 'MDNExpectedUtility', 'RankingMDN', 'SoftRank', 'TwinRank']: ranker = globals()[model_id](sf_para_dict=sf_para_dict, model_para_dict=model_para_dict, gpu=self.gpu, device=self.device) elif model_id == 'WassRank': ranker = WassRank(sf_para_dict=sf_para_dict, wass_para_dict=model_para_dict, dict_cost_mats=self.dict_cost_mats, dict_std_dists=self.dict_std_dists, gpu=self.gpu, device=self.device) else: raise NotImplementedError return ranker
Load a ranker correspondingly :param sf_para_dict: :param model_para_dict: :param kwargs: :return:
load_ranker
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/ltr.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/ltr.py
MIT
def setup_output(self, data_dict=None, eval_dict=None): """ Update output directory :param data_dict: :param eval_dict: :param sf_para_dict: :param model_para_dict: :return: """ model_id = self.model_parameter.model_id grid_search, do_vali, dir_output = eval_dict['grid_search'], eval_dict['do_validation'], eval_dict['dir_output'] mask_label = eval_dict['mask_label'] if grid_search: dir_root = dir_output + '_'.join(['gpu', 'grid', model_id]) + '/' if self.gpu else dir_output + '_'.join(['grid', model_id]) + '/' else: dir_root = dir_output eval_dict['dir_root'] = dir_root if not os.path.exists(dir_root): os.makedirs(dir_root) sf_str = self.sf_parameter.to_para_string() data_eval_str = '_'.join([self.data_setting.to_data_setting_string(), self.eval_setting.to_eval_setting_string()]) if mask_label: data_eval_str = '_'.join([data_eval_str, 'MaskLabel', 'Ratio', '{:,g}'.format(eval_dict['mask_ratio'])]) file_prefix = '_'.join([model_id, 'SF', sf_str, data_eval_str]) if data_dict['scale_data']: if data_dict['scaler_level'] == 'QUERY': file_prefix = '_'.join([file_prefix, 'QS', data_dict['scaler_id']]) else: file_prefix = '_'.join([file_prefix, 'DS', data_dict['scaler_id']]) dir_run = dir_root + file_prefix + '/' # run-specific outputs model_para_string = self.model_parameter.to_para_string() if len(model_para_string) > 0: dir_run = dir_run + model_para_string + '/' eval_dict['dir_run'] = dir_run if not os.path.exists(dir_run): os.makedirs(dir_run) return dir_run
Update output directory :param data_dict: :param eval_dict: :param sf_para_dict: :param model_para_dict: :return:
setup_output
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/ltr.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/ltr.py
MIT
def setup_eval(self, data_dict, eval_dict, sf_para_dict, model_para_dict): """ Finalize the evaluation setting correspondingly :param data_dict: :param eval_dict: :param sf_para_dict: :param model_para_dict: :return: """ sf_para_dict[sf_para_dict['sf_id']].update(dict(num_features=data_dict['num_features'])) self.dir_run = self.setup_output(data_dict, eval_dict) if eval_dict['do_log'] and not self.eval_setting.debug: time_str = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M") sys.stdout = open(self.dir_run + '_'.join(['log', time_str]) + '.txt', "w") #if self.do_summary: self.summary_writer = SummaryWriter(self.dir_run + 'summary') if not model_para_dict['model_id'] in ['MDPRank', 'ExpectedUtility', 'WassRank']: """ Aiming for efficient batch processing, please use a large batch_size, e.g., {train_rough_batch_size, validation_rough_batch_size, test_rough_batch_size = 300, 300, 300} """ #assert data_dict['train_rough_batch_size'] > 1
Finalize the evaluation setting correspondingly :param data_dict: :param eval_dict: :param sf_para_dict: :param model_para_dict: :return:
setup_eval
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/ltr.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/ltr.py
MIT
def log_max(self, data_dict=None, max_cv_avg_scores=None, sf_para_dict=None, eval_dict=None, log_para_str=None): ''' Log the best performance across grid search and the corresponding setting ''' dir_root, cutoffs = eval_dict['dir_root'], eval_dict['cutoffs'] data_id = data_dict['data_id'] sf_str = self.sf_parameter.to_para_string(log=True) data_eval_str = self.data_setting.to_data_setting_string(log=True) +'\n'+ self.eval_setting.to_eval_setting_string(log=True) with open(file=dir_root + '/' + '_'.join([data_id, sf_para_dict['sf_id'], 'max.txt']), mode='w') as max_writer: max_writer.write('\n\n'.join([data_eval_str, sf_str, log_para_str, metric_results_to_string(max_cv_avg_scores, cutoffs, metric='nDCG')]))
Log the best performance across grid search and the corresponding setting
log_max
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/ltr.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/ltr.py
MIT
def kfold_cv_eval(self, data_dict=None, eval_dict=None, sf_para_dict=None, model_para_dict=None): """ Evaluation learning-to-rank methods via k-fold cross validation if there are k folds, otherwise one fold. :param data_dict: settings w.r.t. data :param eval_dict: settings w.r.t. evaluation :param sf_para_dict: settings w.r.t. scoring function :param model_para_dict: settings w.r.t. the ltr_adhoc model :return: """ self.display_information(data_dict, model_para_dict) self.check_consistency(data_dict, eval_dict, sf_para_dict) ranker = self.load_ranker(model_para_dict=model_para_dict, sf_para_dict=sf_para_dict) ranker.uniform_eval_setting(eval_dict=eval_dict) self.setup_eval(data_dict, eval_dict, sf_para_dict, model_para_dict) model_id = model_para_dict['model_id'] fold_num, label_type, max_label = data_dict['fold_num'], data_dict['label_type'], data_dict['max_rele_level'] train_presort, validation_presort, test_presort = \ data_dict['train_presort'], data_dict['validation_presort'], data_dict['test_presort'] # for quick access of common evaluation settings epochs, loss_guided = eval_dict['epochs'], eval_dict['loss_guided'] vali_k, log_step, cutoffs = eval_dict['vali_k'], eval_dict['log_step'], eval_dict['cutoffs'] do_vali, vali_metric, do_summary = eval_dict['do_validation'], eval_dict['vali_metric'], eval_dict['do_summary'] cv_tape = CVTape(model_id=model_id, fold_num=fold_num, cutoffs=cutoffs, do_validation=do_vali) for fold_k in range(1, fold_num + 1): # evaluation over k-fold data ranker.init() # initialize or reset with the same random initialization train_data, test_data, vali_data = self.load_data(eval_dict, data_dict, fold_k) if do_vali: vali_tape = ValidationTape(fold_k=fold_k, num_epochs=epochs, validation_metric=vali_metric, validation_at_k=vali_k, dir_run=self.dir_run) if do_summary: summary_tape = SummaryTape(do_validation=do_vali, cutoffs=cutoffs, label_type=label_type, train_presort=train_presort, test_presort=test_presort, gpu=self.gpu) if not do_vali and loss_guided: opt_loss_tape = OptLossTape(gpu=self.gpu) for epoch_k in range(1, epochs + 1): torch_fold_k_epoch_k_loss, stop_training = ranker.train(train_data=train_data, epoch_k=epoch_k, presort=train_presort, label_type=label_type) ranker.scheduler.step() # adaptive learning rate with step_size=40, gamma=0.5 if stop_training: print('training is failed !') break if (do_summary or do_vali) and (epoch_k % log_step == 0 or epoch_k == 1): # stepwise check if do_vali: # per-step validation score torch_vali_metric_value = ranker.validation(vali_data=vali_data, k=vali_k, device='cpu', vali_metric=vali_metric, label_type=label_type, max_label=max_label, presort=validation_presort) vali_metric_value = torch_vali_metric_value.squeeze(-1).data.numpy() vali_tape.epoch_validation(ranker=ranker, epoch_k=epoch_k, metric_value=vali_metric_value) if do_summary: # summarize per-step performance w.r.t. train, test summary_tape.epoch_summary(ranker=ranker, torch_epoch_k_loss=torch_fold_k_epoch_k_loss, train_data=train_data, test_data=test_data, vali_metric_value=vali_metric_value if do_vali else None) elif loss_guided: # stopping check via epoch-loss early_stopping = opt_loss_tape.epoch_cmp_loss(fold_k=fold_k, epoch_k=epoch_k, torch_epoch_k_loss=torch_fold_k_epoch_k_loss) if early_stopping: break if do_summary: # track summary_tape.fold_summary(fold_k=fold_k, dir_run=self.dir_run, train_data_length=train_data.__len__()) if do_vali: # using the fold-wise optimal model for later testing based on validation data ranker.load(vali_tape.get_optimal_path(), device=self.device) vali_tape.clear_fold_buffer(fold_k=fold_k) else: # buffer the model after a fixed number of training-epoches if no validation is deployed fold_optimal_checkpoint = '-'.join(['Fold', str(fold_k)]) ranker.save(dir=self.dir_run + fold_optimal_checkpoint + '/', name='_'.join(['net_params_epoch', str(epoch_k)]) + '.pkl') cv_tape.fold_evaluation(model_id=model_id, ranker=ranker, test_data=test_data, max_label=max_label, fold_k=fold_k) ndcg_cv_avg_scores = cv_tape.get_cv_performance() return ndcg_cv_avg_scores
Evaluation learning-to-rank methods via k-fold cross validation if there are k folds, otherwise one fold. :param data_dict: settings w.r.t. data :param eval_dict: settings w.r.t. evaluation :param sf_para_dict: settings w.r.t. scoring function :param model_para_dict: settings w.r.t. the ltr_adhoc model :return:
kfold_cv_eval
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/ltr.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/ltr.py
MIT
def naive_train(self, ranker, eval_dict, train_data=None, test_data=None): """ A simple train and test, namely train based on training data & test based on testing data :param ranker: :param eval_dict: :param train_data: :param test_data: :param vali_data: :return: """ ranker.reset_parameters() # reset with the same random initialization assert train_data is not None assert test_data is not None list_losses = [] list_train_ndcgs = [] list_test_ndcgs = [] epochs, cutoffs = eval_dict['epochs'], eval_dict['cutoffs'] for i in range(epochs): epoch_loss = torch.zeros(1).to(self.device) if self.gpu else torch.zeros(1) for qid, batch_rankings, batch_stds in train_data: if self.gpu: batch_rankings, batch_stds = batch_rankings.to(self.device), batch_stds.to(self.device) batch_loss, stop_training = ranker.train(batch_rankings, batch_stds, qid=qid) epoch_loss += batch_loss.item() np_epoch_loss = epoch_loss.cpu().numpy() if self.gpu else epoch_loss.data.numpy() list_losses.append(np_epoch_loss) test_ndcg_ks = ranker.ndcg_at_ks(test_data=test_data, ks=cutoffs, label_type=LABEL_TYPE.MultiLabel, device='cpu') np_test_ndcg_ks = test_ndcg_ks.data.numpy() list_test_ndcgs.append(np_test_ndcg_ks) train_ndcg_ks = ranker.ndcg_at_ks(test_data=train_data, ks=cutoffs, label_type=LABEL_TYPE.MultiLabel, device='cpu') np_train_ndcg_ks = train_ndcg_ks.data.numpy() list_train_ndcgs.append(np_train_ndcg_ks) test_ndcgs = np.vstack(list_test_ndcgs) train_ndcgs = np.vstack(list_train_ndcgs) return list_losses, train_ndcgs, test_ndcgs
A simple train and test, namely train based on training data & test based on testing data :param ranker: :param eval_dict: :param train_data: :param test_data: :param vali_data: :return:
naive_train
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/ltr.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/ltr.py
MIT
def set_model_setting(self, model_id=None, dir_json=None, debug=False): """ Initialize the parameter class for a specified model :param debug: :param model_id: :return: """ if model_id in ['RankMSE', 'ListMLE', 'ListNet', 'RankCosine', 'DASALC', 'HistogramAP']: # ModelParameter is sufficient self.model_parameter = ModelParameter(model_id=model_id) else: if dir_json is not None: para_json = dir_json + model_id + "Parameter.json" self.model_parameter = globals()[model_id + "Parameter"](para_json=para_json) else: # the 3rd type, where debug-mode enables quick test self.model_parameter = globals()[model_id + "Parameter"](debug=debug)
Initialize the parameter class for a specified model :param debug: :param model_id: :return:
set_model_setting
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/ltr.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/ltr.py
MIT
def declare_global(self, model_id=None): """ Declare global variants if required, such as for efficiency :param model_id: :return: """ if model_id == 'WassRank': # global buffering across a number of runs with different model parameters self.dict_cost_mats, self.dict_std_dists = dict(), dict()
Declare global variants if required, such as for efficiency :param model_id: :return:
declare_global
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/ltr.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/ltr.py
MIT
def point_run(self, debug=False, model_id=None, sf_id=None, data_id=None, dir_data=None, dir_output=None, dir_json=None, reproduce=False): """ Perform one-time run based on given setting. :param debug: :param model_id: :param data_id: :param dir_data: :param dir_output: :return: """ if dir_json is None: self.set_eval_setting(debug=debug, dir_output=dir_output) self.set_data_setting(debug=debug, data_id=data_id, dir_data=dir_data) self.set_scoring_function_setting(debug=debug, sf_id=sf_id) self.set_model_setting(debug=debug, model_id=model_id) else: data_eval_sf_json = dir_json + 'Data_Eval_ScoringFunction.json' self.set_eval_setting(eval_json=data_eval_sf_json) self.set_data_setting(data_json=data_eval_sf_json) self.set_scoring_function_setting(sf_json=data_eval_sf_json) self.set_model_setting(model_id=model_id, dir_json=dir_json) data_dict = self.get_default_data_setting() eval_dict = self.get_default_eval_setting() sf_para_dict = self.get_default_scoring_function_setting() model_para_dict = self.get_default_model_setting() self.declare_global(model_id=model_id) if reproduce: self.kfold_cv_reproduce(data_dict=data_dict, eval_dict=eval_dict, model_para_dict=model_para_dict, sf_para_dict=sf_para_dict) else: self.kfold_cv_eval(data_dict=data_dict, eval_dict=eval_dict, model_para_dict=model_para_dict, sf_para_dict=sf_para_dict)
Perform one-time run based on given setting. :param debug: :param model_id: :param data_id: :param dir_data: :param dir_output: :return:
point_run
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/ltr.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/ltr.py
MIT
def grid_run(self, model_id=None, sf_id=None, dir_json=None, debug=False, data_id=None, dir_data=None, dir_output=None): """ Explore the effects of different hyper-parameters of a model based on grid-search :param debug: :param model_id: :param data_id: :param dir_data: :param dir_output: :return: """ if dir_json is not None: data_eval_sf_json = dir_json + 'Data_Eval_ScoringFunction.json' self.set_data_setting(data_json=data_eval_sf_json) self.set_scoring_function_setting(sf_json=data_eval_sf_json) self.set_eval_setting(debug=debug, eval_json=data_eval_sf_json) self.set_model_setting(model_id=model_id, dir_json=dir_json) else: self.set_eval_setting(debug=debug, dir_output=dir_output) self.set_data_setting(debug=debug, data_id=data_id, dir_data=dir_data) self.set_scoring_function_setting(debug=debug, sf_id=sf_id) self.set_model_setting(debug=debug, model_id=model_id) self.declare_global(model_id=model_id) ''' select the best setting through grid search ''' vali_k, cutoffs = 5, [1, 3, 5, 10, 20, 50] max_cv_avg_scores = np.zeros(len(cutoffs)) # fold average k_index = cutoffs.index(vali_k) max_common_para_dict, max_sf_para_dict, max_model_para_dict = None, None, None for data_dict in self.iterate_data_setting(): for eval_dict in self.iterate_eval_setting(): assert self.eval_setting.check_consistence(vali_k=vali_k, cutoffs=cutoffs) # a necessary consistence for sf_para_dict in self.iterate_scoring_function_setting(): for model_para_dict in self.iterate_model_setting(): curr_cv_avg_scores = self.kfold_cv_eval(data_dict=data_dict, eval_dict=eval_dict, sf_para_dict=sf_para_dict, model_para_dict=model_para_dict) if curr_cv_avg_scores[k_index] > max_cv_avg_scores[k_index]: max_cv_avg_scores, max_sf_para_dict, max_eval_dict, max_model_para_dict = \ curr_cv_avg_scores, sf_para_dict, eval_dict, model_para_dict # log max setting self.log_max(data_dict=data_dict, eval_dict=max_eval_dict, max_cv_avg_scores=max_cv_avg_scores, sf_para_dict=max_sf_para_dict, log_para_str=self.model_parameter.to_para_string(log=True, given_para_dict=max_model_para_dict))
Explore the effects of different hyper-parameters of a model based on grid-search :param debug: :param model_id: :param data_id: :param dir_data: :param dir_output: :return:
grid_run
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/ltr.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/ltr.py
MIT
def load_para_json(self, para_json): """ load json file of parameter setting """ with open(para_json) as json_file: json_dict = json.load(json_file) return json_dict
load json file of parameter setting
load_para_json
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/parameter.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/parameter.py
MIT
def default_pointsf_para_dict(self): """ A default setting of the hyper-parameters of the stump neural scoring function. """ # common default settings for a scoring function based on feed-forward neural networks self.sf_para_dict = dict() if self.use_json: opt = self.json_dict['opt'][0] lr = self.json_dict['lr'][0] pointsf_json_dict = self.json_dict[self.sf_id] num_layers = pointsf_json_dict['layers'][0] af = pointsf_json_dict['AF'][0] tl_af = pointsf_json_dict['TL_AF'][0] apply_tl_af = pointsf_json_dict['apply_tl_af'][0] BN = pointsf_json_dict['BN'][0] bn_type = pointsf_json_dict['bn_type'][0] bn_affine = pointsf_json_dict['bn_affine'][0] self.sf_para_dict['opt'] = opt self.sf_para_dict['lr'] = lr pointsf_para_dict = dict(num_layers=num_layers, AF=af, TL_AF=tl_af, apply_tl_af=apply_tl_af, BN=BN, bn_type=bn_type, bn_affine=bn_affine) self.sf_para_dict['sf_id'] = self.sf_id self.sf_para_dict[self.sf_id] = pointsf_para_dict else: self.sf_para_dict['opt'] = 'Adam' # Adam | RMS | Adagrad self.sf_para_dict['lr'] = 0.0001 # learning rate pointsf_para_dict = dict(num_layers=5, AF='GE', TL_AF='S', apply_tl_af=True, BN=True, bn_type='BN', bn_affine=True) self.sf_para_dict['sf_id'] = self.sf_id self.sf_para_dict[self.sf_id] = pointsf_para_dict return self.sf_para_dict
A default setting of the hyper-parameters of the stump neural scoring function.
default_pointsf_para_dict
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/parameter.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/parameter.py
MIT
def default_listsf_para_dict(self): """ A default setting of the hyper-parameters of the permutation-equivariant neural scoring function. """ self.sf_para_dict = dict() self.sf_para_dict['opt'] = 'Adagrad' # Adam | RMS | Adagrad self.sf_para_dict['lr'] = 0.001 # learning rate listsf_para_dict = dict(ff_dims=[128, 256, 512], AF='R', TL_AF='GE', apply_tl_af=False, BN=False, bn_type='BN2', bn_affine=False, n_heads=2, encoder_layers=6, encoder_type='DASALC') # DASALC, AllRank, AttnDIN self.sf_para_dict['sf_id'] = self.sf_id self.sf_para_dict[self.sf_id] = listsf_para_dict return self.sf_para_dict
A default setting of the hyper-parameters of the permutation-equivariant neural scoring function.
default_listsf_para_dict
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/parameter.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/parameter.py
MIT
def pointsf_grid_search(self): """ Iterator of hyper-parameters of the stump neural scoring function. """ if self.use_json: choice_opt = self.json_dict['opt'] choice_lr = self.json_dict['lr'] pointsf_json_dict = self.json_dict[self.sf_id] choice_layers = pointsf_json_dict['layers'] choice_af = pointsf_json_dict['AF'] choice_tl_af = pointsf_json_dict['TL_AF'] choice_apply_tl_af = pointsf_json_dict['apply_tl_af'] choice_BN = pointsf_json_dict['BN'] choice_bn_type = pointsf_json_dict['bn_type'] choice_bn_affine = pointsf_json_dict['bn_affine'] else: choice_opt = ['Adam'] choice_lr = [0.001] choice_BN = [True] choice_bn_type = ['BN2'] choice_bn_affine = [False] choice_layers = [3] if self.debug else [5] # 1, 2, 3, 4 choice_af = ['R', 'CE'] if self.debug else ['R', 'CE', 'S'] # ['R', 'LR', 'RR', 'E', 'SE', 'CE', 'S'] choice_tl_af = ['R', 'CE'] if self.debug else ['R', 'CE', 'S'] # ['R', 'LR', 'RR', 'E', 'SE', 'CE', 'S'] choice_apply_tl_af = [True] # True, False for opt, lr in product(choice_opt, choice_lr): sf_para_dict = dict() sf_para_dict['sf_id'] = self.sf_id base_dict = dict(opt=opt, lr=lr) sf_para_dict.update(base_dict) for num_layers, af, apply_tl_af, BN in product(choice_layers, choice_af, choice_apply_tl_af, choice_BN): pointsf_para_dict = dict(num_layers=num_layers, AF=af, apply_tl_af=apply_tl_af, BN=BN) if apply_tl_af: for tl_af in choice_tl_af: pointsf_para_dict.update(dict(TL_AF=tl_af)) if BN: for bn_type, bn_affine in product(choice_bn_type, choice_bn_affine): bn_dict = dict(bn_type=bn_type, bn_affine=bn_affine) pointsf_para_dict.update(bn_dict) sf_para_dict[self.sf_id] = pointsf_para_dict self.sf_para_dict = sf_para_dict yield sf_para_dict else: sf_para_dict[self.sf_id] = pointsf_para_dict self.sf_para_dict = sf_para_dict yield sf_para_dict else: if BN: for bn_type, bn_affine in product(choice_bn_type, choice_bn_affine): bn_dict = dict(bn_type=bn_type, bn_affine=bn_affine) pointsf_para_dict.update(bn_dict) sf_para_dict[self.sf_id] = pointsf_para_dict self.sf_para_dict = sf_para_dict yield sf_para_dict else: sf_para_dict[self.sf_id] = pointsf_para_dict self.sf_para_dict = sf_para_dict yield sf_para_dict
Iterator of hyper-parameters of the stump neural scoring function.
pointsf_grid_search
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/parameter.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/parameter.py
MIT
def pointsf_to_para_string(self, log=False): ''' Get the identifier of scoring function ''' s1, s2 = (':', '\n') if log else ('_', '_') sf_para_dict = self.sf_para_dict[self.sf_id] sf_str_1 = self.get_stacked_FFNet_str(ff_para_dict=sf_para_dict, point=True, log=log, s1=s1, s2=s2) opt, lr = self.sf_para_dict['opt'], self.sf_para_dict['lr'] sf_str_3 = s2.join([s1.join(['Opt', opt]), s1.join(['lr', '{:,g}'.format(lr)])]) if log\ else '_'.join([opt, '{:,g}'.format(lr)]) if log: sf_str = s2.join([sf_str_1, sf_str_3]) else: sf_str = '_'.join([sf_str_1, sf_str_3]) return sf_str
Get the identifier of scoring function
pointsf_to_para_string
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/parameter.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/parameter.py
MIT
def listsf_to_para_string(self, log=False): ''' Get the identifier of scoring function ''' s1, s2 = (':', '\n') if log else ('_', '_') sf_para_dict = self.sf_para_dict[self.sf_id] sf_str_1 = self.get_stacked_FFNet_str(ff_para_dict=sf_para_dict, point=False, log=log, s1=s1, s2=s2) sf_str_2 = self.get_encoder_str(ff_para_dict=sf_para_dict, log=log, s1=s1, s2=s2) opt, lr = self.sf_para_dict['opt'], self.sf_para_dict['lr'] sf_str_3 = s2.join([s1.join(['Opt', opt]), s1.join(['lr', '{:,g}'.format(lr)])]) if log\ else '_'.join([opt, '{:,g}'.format(lr)]) if log: sf_str = s2.join([sf_str_1, sf_str_2, sf_str_3]) else: sf_str = '_'.join([sf_str_1, sf_str_2, sf_str_3]) return sf_str
Get the identifier of scoring function
listsf_to_para_string
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/parameter.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/parameter.py
MIT
def to_eval_setting_string(self, log=False): """ String identifier of eval-setting :param log: :return: """ eval_dict = self.eval_dict s1, s2 = (':', '\n') if log else ('_', '_') do_vali, epochs = eval_dict['do_validation'], eval_dict['epochs'] if do_vali: vali_metric, vali_k = eval_dict['vali_metric'], eval_dict['vali_k'] vali_str = '@'.join([vali_metric, str(vali_k)]) eval_string = s2.join([s1.join(['epochs', str(epochs)]), s1.join(['validation', vali_str])]) if log \ else s1.join(['EP', str(epochs), 'V', vali_str]) else: eval_string = s1.join(['epochs', str(epochs)]) return eval_string
String identifier of eval-setting :param log: :return:
to_eval_setting_string
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/parameter.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/parameter.py
MIT
def default_setting(self): """ A default setting for evaluation :param debug: :param data_id: :param dir_output: :return: """ if self.use_json: dir_output = self.json_dict['dir_output'] epochs = self.json_dict['epochs'] # debug is added for a quick check do_validation = self.json_dict['do_validation'] vali_k = self.json_dict['vali_k'] if do_validation else None vali_metric = self.json_dict['vali_metric'] if do_validation else None cutoffs = self.json_dict['cutoffs'] do_log, log_step = self.json_dict['do_log'], self.json_dict['log_step'] do_summary = self.json_dict['do_summary'] loss_guided = self.json_dict['loss_guided'] mask_label = self.json_dict['mask']['mask_label'] mask_type = self.json_dict['mask']['mask_type'] mask_ratio = self.json_dict['mask']['mask_ratio'] self.eval_dict = dict(debug=False, grid_search=False, dir_output=dir_output, cutoffs=cutoffs, do_validation=do_validation, vali_k=vali_k, vali_metric=vali_metric, do_summary=do_summary, do_log=do_log, log_step=log_step, loss_guided=loss_guided, epochs=epochs, mask_label=mask_label, mask_type=mask_type, mask_ratio=mask_ratio) else: do_log = False if self.debug else True do_validation, do_summary = True, False # checking loss variation log_step = 1 epochs = 5 if self.debug else 100 vali_k = 5 if do_validation else None vali_metric = 'nDCG' if do_validation else None ''' setting for exploring the impact of randomly removing some ground-truth labels ''' mask_label = False mask_type = 'rand_mask_all' mask_ratio = 0.2 # more evaluation settings that are rarely changed self.eval_dict = dict(debug=self.debug, grid_search=False, dir_output=self.dir_output, do_validation=do_validation, vali_k=vali_k, vali_metric=vali_metric, cutoffs=[1, 3, 5, 10, 20, 50], epochs=epochs, do_summary=do_summary, do_log=do_log, log_step=log_step, loss_guided=False, mask_label=mask_label, mask_type=mask_type, mask_ratio=mask_ratio) return self.eval_dict
A default setting for evaluation :param debug: :param data_id: :param dir_output: :return:
default_setting
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/parameter.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/parameter.py
MIT
def default_setting(self): """ A default setting for data loading :return: """ if self.use_json: scaler_id = self.json_dict['scaler_id'] min_docs = self.json_dict['min_docs'][0] min_rele = self.json_dict['min_rele'][0] binary_rele = self.json_dict['binary_rele'][0] unknown_as_zero = self.json_dict['unknown_as_zero'][0] tr_batch_size = self.json_dict['tr_batch_size'][0] # train_rough_batch_size scale_data, scaler_id, scaler_level = get_scaler_setting(data_id=self.data_id, scaler_id=scaler_id) # hard-coding for rarely changed settings self.data_dict = dict(data_id=self.data_id, dir_data=self.json_dict["dir_data"], train_presort=True, test_presort=True, validation_presort=True, validation_rough_batch_size=100, test_rough_batch_size=100, min_docs=min_docs, min_rele=min_rele, train_rough_batch_size=tr_batch_size, scale_data = scale_data, scaler_id = scaler_id, scaler_level = scaler_level, unknown_as_zero=unknown_as_zero, binary_rele=binary_rele) else: unknown_as_zero = False # using original labels, e.g., w.r.t. semi-supervised dataset binary_rele = False # using original labels train_presort, validation_presort, test_presort = True, True, True #train_rough_batch_size, validation_rough_batch_size, test_rough_batch_size = 1, 100, 100 train_rough_batch_size, validation_rough_batch_size, test_rough_batch_size = 100, 100, 100 scale_data, scaler_id, scaler_level = get_scaler_setting(data_id=self.data_id) # more data settings that are rarely changed self.data_dict = dict(data_id=self.data_id, dir_data=self.dir_data, min_docs=10, min_rele=1, scale_data = scale_data, scaler_id = scaler_id, scaler_level = scaler_level, train_presort=train_presort, validation_presort=validation_presort, test_presort=test_presort, train_rough_batch_size=train_rough_batch_size, validation_rough_batch_size=validation_rough_batch_size, test_rough_batch_size=test_rough_batch_size, unknown_as_zero=unknown_as_zero, binary_rele=binary_rele) data_meta = get_data_meta(data_id=self.data_id) # add meta-information if self.debug: data_meta['fold_num'] = 2 self.data_dict.update(data_meta) return self.data_dict
A default setting for data loading :return:
default_setting
python
wildltr/ptranking
ptranking/ltr_adhoc/eval/parameter.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/eval/parameter.py
MIT
def get_approx_ranks(input, alpha=10, device=None): ''' get approximated rank positions: Equation-11 in the paper''' batch_pred_diffs = torch.unsqueeze(input, dim=2) - torch.unsqueeze(input, dim=1) # computing pairwise differences, i.e., Sij or Sxy batch_indicators = robust_sigmoid(torch.transpose(batch_pred_diffs, dim0=1, dim1=2), alpha, device) # using {-1.0*} may lead to a poor performance when compared with the above way; batch_hat_pis = torch.sum(batch_indicators, dim=2) + 0.5 # get approximated rank positions, i.e., hat_pi(x) return batch_hat_pis
get approximated rank positions: Equation-11 in the paper
get_approx_ranks
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/approxNDCG.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/approxNDCG.py
MIT
def custom_loss_function(self, batch_preds, batch_std_labels, **kwargs): ''' @param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents associated with the same query @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query @param kwargs: @return: ''' label_type = kwargs['label_type'] assert label_type == LABEL_TYPE.MultiLabel if 'presort' in kwargs and kwargs['presort']: target_batch_preds, batch_ideal_rankings = batch_preds, batch_std_labels else: batch_ideal_rankings, batch_ideal_desc_inds = torch.sort(batch_std_labels, dim=1, descending=True) target_batch_preds = torch.gather(batch_preds, dim=1, index=batch_ideal_desc_inds) ''' Given the ideal rankings, the optimization objective is to maximize the approximated nDCG based on differentiable rank positions ''' batch_loss = approxNDCG_loss(batch_preds=target_batch_preds, batch_ideal_rankings=batch_ideal_rankings, alpha=self.alpha, label_type=label_type, device=self.device) self.optimizer.zero_grad() batch_loss.backward() self.optimizer.step() return batch_loss
@param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents associated with the same query @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query @param kwargs: @return:
custom_loss_function
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/approxNDCG.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/approxNDCG.py
MIT
def to_para_string(self, log=False, given_para_dict=None): """ String identifier of parameters :param log: :param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search :return: """ # using specified para-dict or inner para-dict apxNDCG_para_dict = given_para_dict if given_para_dict is not None else self.apxNDCG_para_dict s1 = ':' if log else '_' apxNDCG_paras_str = s1.join(['Alpha', str(apxNDCG_para_dict['alpha'])]) return apxNDCG_paras_str
String identifier of parameters :param log: :param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search :return:
to_para_string
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/approxNDCG.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/approxNDCG.py
MIT
def grid_search(self): """ Iterator of parameter settings for ApproxNDCG """ if self.use_json: choice_alpha = self.json_dict['alpha'] else: choice_alpha = [10.0] if self.debug else [10.0] # 1.0, 10.0, 50.0, 100.0 for alpha in choice_alpha: self.apxNDCG_para_dict = dict(model_id=self.model_id, alpha=alpha) yield self.apxNDCG_para_dict
Iterator of parameter settings for ApproxNDCG
grid_search
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/approxNDCG.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/approxNDCG.py
MIT
def custom_loss_function(self, batch_preds, batch_std_labels, **kwargs): ''' The Top-1 approximated ListNet loss, which reduces to a softmax and simple cross entropy. @param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents associated with the same query @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query @param kwargs: @return: ''' #print('batch_preds', batch_preds.size()) #print('batch_stds', batch_stds.size()) # todo-as-note: log(softmax(x)), doing these two operations separately is slower, and numerically unstable. # c.f. https://pytorch.org/docs/stable/_modules/torch/nn/functional.html batch_loss = torch.sum(-torch.sum(F.softmax(batch_std_labels, dim=1) * F.log_softmax(batch_preds, dim=1), dim=1)) self.optimizer.zero_grad() batch_loss.backward() self.optimizer.step() return batch_loss
The Top-1 approximated ListNet loss, which reduces to a softmax and simple cross entropy. @param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents associated with the same query @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query @param kwargs: @return:
custom_loss_function
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/dasalc.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/dasalc.py
MIT
def custom_loss_function(self, batch_preds, batch_std_labels, **kwargs): ''' @param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents associated with the same query @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query @param kwargs: @return: ''' label_type = kwargs['label_type'] assert label_type == LABEL_TYPE.MultiLabel if 'presort' in kwargs and kwargs['presort']: target_batch_preds, batch_ideal_rankings = batch_preds, batch_std_labels else: batch_ideal_rankings, batch_ideal_desc_inds = torch.sort(batch_std_labels, dim=1, descending=True) target_batch_preds = torch.gather(batch_preds, dim=1, index=batch_ideal_desc_inds) batch_descending_preds, batch_pred_desc_inds = torch.sort(target_batch_preds, dim=1, descending=True) # sort documents according to the predicted relevance batch_predict_rankings = torch.gather(batch_ideal_rankings, dim=1, index=batch_pred_desc_inds) # reorder batch_stds correspondingly so as to make it consistent. BTW, batch_stds[batch_preds_sorted_inds] only works with 1-D tensor #batch_std_ranks = torch.arange(target_batch_preds.size(1)).type(torch.cuda.FloatTensor) if self.gpu else torch.arange(target_batch_preds.size(1)).type(torch.FloatTensor) batch_std_ranks = torch.arange(target_batch_preds.size(1), dtype=torch.float, device=self.device) dists_1D = 1.0 / torch.log2(batch_std_ranks + 2.0) # discount co-efficients # ideal dcg values based on optimal order batch_idcgs = torch_dcg_at_k(batch_rankings=batch_ideal_rankings, device=self.device) if label_type == LABEL_TYPE.MultiLabel: batch_gains = torch.pow(2.0, batch_predict_rankings) - 1.0 elif label_type == LABEL_TYPE.Permutation: batch_gains = batch_predict_rankings else: raise NotImplementedError batch_n_gains = batch_gains / batch_idcgs # normalised gains if 'NDCG_Loss1' == self.loss_type: power_weights = ndcg_loss1_power_weights(batch_n_gains=batch_n_gains, discounts=dists_1D) elif 'NDCG_Loss2' == self.loss_type: power_weights = ndcg_loss2_power_weights(batch_n_gains=batch_n_gains, discounts=dists_1D) elif 'NDCG_Loss2++' == self.loss_type: power_weights = ndcg_loss2plusplus_power_weights(batch_n_gains=batch_n_gains, discounts=dists_1D, mu=self.mu) batch_pred_diffs = (torch.unsqueeze(batch_descending_preds, dim=2) - torch.unsqueeze(batch_descending_preds, dim=1)).clamp(min=-1e8, max=1e8) # computing pairwise differences, i.e., s_i - s_j batch_pred_diffs[torch.isnan(batch_pred_diffs)] = 0. weighted_probas = (torch.sigmoid(self.sigma * batch_pred_diffs).clamp(min=epsilon) ** power_weights).clamp(min=epsilon) log_weighted_probas = torch.log2(weighted_probas) # mask for truncation based on cutoff k trunc_mask = torch.zeros((target_batch_preds.shape[1], target_batch_preds.shape[1]), dtype=torch.bool, device=self.device) trunc_mask[:self.k, :self.k] = 1 if self.loss_type in ['NDCG_Loss2', 'NDCG_Loss2++']: batch_std_diffs = torch.unsqueeze(batch_predict_rankings, dim=2) - torch.unsqueeze(batch_predict_rankings, dim=1) # standard pairwise differences, i.e., S_{ij} padded_pairs_mask = batch_std_diffs>0 padded_log_weighted_probas = log_weighted_probas [padded_pairs_mask & trunc_mask] else: padded_log_weighted_probas = log_weighted_probas [trunc_mask[None, :, :]] batch_loss = -torch.sum(padded_log_weighted_probas) self.optimizer.zero_grad() batch_loss.backward() self.optimizer.step() return batch_loss
@param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents associated with the same query @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query @param kwargs: @return:
custom_loss_function
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/lambdaloss.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/lambdaloss.py
MIT
def to_para_string(self, log=False, given_para_dict=None): """ String identifier of parameters :param log: :param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search :return: """ # using specified para-dict or inner para-dict lambdaloss_para_dict = given_para_dict if given_para_dict is not None else self.lambdaloss_para_dict s1, s2 = (':', '\n') if log else ('_', '_') if 'NDCG_Loss2++' == lambdaloss_para_dict['loss_type']: lambdaloss_paras_str = s1.join([lambdaloss_para_dict['loss_type'], 'Sigma', '{:,g}'.format(lambdaloss_para_dict['sigma']), 'Mu', '{:,g}'.format(lambdaloss_para_dict['mu'])]) return lambdaloss_paras_str else: lambdaloss_paras_str = s1.join( [lambdaloss_para_dict['loss_type'], 'Sigma', '{:,g}'.format(lambdaloss_para_dict['sigma'])]) return lambdaloss_paras_str
String identifier of parameters :param log: :param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search :return:
to_para_string
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/lambdaloss.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/lambdaloss.py
MIT
def grid_search(self): """ Iterator of parameter settings for LambdaLoss :param debug: :return: """ if self.use_json: choice_k = self.json_dict['k'] choice_mu = self.json_dict['mu'] choice_sigma = self.json_dict['sigma'] choice_loss_type = self.json_dict['loss_type'] else: choice_loss_type = ['NDCG_Loss2'] if self.debug else ['NDCG_Loss2'] # choice_sigma = [1.0] if self.debug else [1.0] # choice_mu = [5.0] if self.debug else [5.0] # choice_k = [5] if self.debug else [5] for loss_type, sigma, k in product(choice_loss_type, choice_sigma, choice_k): if 'NDCG_Loss2++' == loss_type: for mu in choice_mu: self.lambdaloss_para_dict = dict(model_id='LambdaLoss', sigma=sigma, loss_type=loss_type, mu=mu, k=k) yield self.lambdaloss_para_dict else: self.lambdaloss_para_dict = dict(model_id='LambdaLoss', sigma=sigma, loss_type=loss_type, k=k) yield self.lambdaloss_para_dict
Iterator of parameter settings for LambdaLoss :param debug: :return:
grid_search
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/lambdaloss.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/lambdaloss.py
MIT
def custom_loss_function(self, batch_preds, batch_std_labels, **kwargs): ''' @param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents associated with the same query @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query @param kwargs: @return: ''' assert 'label_type' in kwargs and LABEL_TYPE.MultiLabel == kwargs['label_type'] label_type = kwargs['label_type'] assert 'presort' in kwargs and kwargs['presort'] is True # aiming for direct usage of ideal ranking # sort documents according to the predicted relevance batch_descending_preds, batch_pred_desc_inds = torch.sort(batch_preds, dim=1, descending=True) # reorder batch_stds correspondingly so as to make it consistent. # BTW, batch_stds[batch_preds_sorted_inds] only works with 1-D tensor batch_predict_rankings = torch.gather(batch_std_labels, dim=1, index=batch_pred_desc_inds) batch_p_ij, batch_std_p_ij = get_pairwise_comp_probs(batch_preds=batch_descending_preds, batch_std_labels=batch_predict_rankings, sigma=self.sigma) batch_delta_ndcg = get_delta_ndcg(batch_ideal_rankings=batch_std_labels, batch_predict_rankings=batch_predict_rankings, label_type=label_type, device=self.device) _batch_loss = F.binary_cross_entropy(input=torch.triu(batch_p_ij, diagonal=1), target=torch.triu(batch_std_p_ij, diagonal=1), weight=torch.triu(batch_delta_ndcg, diagonal=1), reduction='none') batch_loss = torch.sum(torch.sum(_batch_loss, dim=(2, 1))) self.optimizer.zero_grad() batch_loss.backward() self.optimizer.step() return batch_loss
@param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents associated with the same query @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query @param kwargs: @return:
custom_loss_function
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/lambdarank.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/lambdarank.py
MIT
def to_para_string(self, log=False, given_para_dict=None): """ String identifier of parameters :param log: :param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search :return: """ # using specified para-dict or inner para-dict lambda_para_dict = given_para_dict if given_para_dict is not None else self.lambda_para_dict s1, s2 = (':', '\n') if log else ('_', '_') lambdarank_para_str = s1.join(['Sigma', '{:,g}'.format(lambda_para_dict['sigma'])]) return lambdarank_para_str
String identifier of parameters :param log: :param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search :return:
to_para_string
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/lambdarank.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/lambdarank.py
MIT
def grid_search(self): """ Iterator of parameter settings for LambdaRank """ if self.use_json: choice_sigma = self.json_dict['sigma'] else: choice_sigma = [5.0, 1.0] if self.debug else [1.0] # 1.0, 10.0, 50.0, 100.0 for sigma in choice_sigma: self.lambda_para_dict = dict(model_id=self.model_id, sigma=sigma) yield self.lambda_para_dict
Iterator of parameter settings for LambdaRank
grid_search
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/lambdarank.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/lambdarank.py
MIT
def forward(ctx, input): ''' In the forward pass we receive a context object and a Tensor containing the input; we must return a Tensor containing the output, and we can use the context object to cache objects for use in the backward pass. Specifically, ctx is a context object that can be used to stash information for backward computation. You can cache arbitrary objects for use in the backward pass using the ctx.save_for_backward method. :param ctx: :param input: i.e., batch_preds of [batch, ranking_size], each row represents the relevance predictions for documents within a ltr_adhoc :return: [batch, ranking_size], each row represents the log_cumsum_exp value ''' m, _ = torch.max(input, dim=1, keepdim=True) #a transformation aiming for higher stability when computing softmax() with exp() y = input - m y = torch.exp(y) y_backward_cumsum = torch.flip(torch.cumsum(torch.flip(y, dims=[1]), dim=1), dims=[1]) #row-wise cumulative sum, from tail to head fd_output = torch.log(y_backward_cumsum) + m # corresponding to the '-m' operation ctx.save_for_backward(input, fd_output) return fd_output
In the forward pass we receive a context object and a Tensor containing the input; we must return a Tensor containing the output, and we can use the context object to cache objects for use in the backward pass. Specifically, ctx is a context object that can be used to stash information for backward computation. You can cache arbitrary objects for use in the backward pass using the ctx.save_for_backward method. :param ctx: :param input: i.e., batch_preds of [batch, ranking_size], each row represents the relevance predictions for documents within a ltr_adhoc :return: [batch, ranking_size], each row represents the log_cumsum_exp value
forward
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/listmle.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/listmle.py
MIT
def backward(ctx, grad_output): ''' In the backward pass we receive the context object and a Tensor containing the gradient of the loss with respect to the output produced during the forward pass (i.e., forward's output). We can retrieve cached data from the context object, and must compute and return the gradient of the loss with respect to the input to the forward function. Namely, grad_output is the gradient of the loss w.r.t. forward's output. Here we first compute the gradient (denoted as grad_out_wrt_in) of forward's output w.r.t. forward's input. Based on the chain rule, grad_output * grad_out_wrt_in would be the desired output, i.e., the gradient of the loss w.r.t. forward's input :param ctx: :param grad_output: :return: ''' input, fd_output = ctx.saved_tensors #chain rule bk_output = grad_output * (torch.exp(input) * torch.cumsum(torch.exp(-fd_output), dim=1)) return bk_output
In the backward pass we receive the context object and a Tensor containing the gradient of the loss with respect to the output produced during the forward pass (i.e., forward's output). We can retrieve cached data from the context object, and must compute and return the gradient of the loss with respect to the input to the forward function. Namely, grad_output is the gradient of the loss w.r.t. forward's output. Here we first compute the gradient (denoted as grad_out_wrt_in) of forward's output w.r.t. forward's input. Based on the chain rule, grad_output * grad_out_wrt_in would be the desired output, i.e., the gradient of the loss w.r.t. forward's input :param ctx: :param grad_output: :return:
backward
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/listmle.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/listmle.py
MIT
def custom_loss_function(self, batch_preds, batch_std_labels, **kwargs): ''' @param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents associated with the same query @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query @param kwargs: @return: ''' # shuffle per epoch rather than using the same order for a query batch_shuffle_ties_inds = arg_shuffle_ties(batch_rankings=batch_std_labels, descending=True, device=self.device) batch_preds_shuffled_ties = torch.gather(batch_preds, dim=1, index=batch_shuffle_ties_inds) # 1 using self-defined op since torch.flip() is later added ''' batch_logcumsumexps = apply_LogCumsumExp(target_batch_preds) batch_loss = torch.sum(batch_logcumsumexps - target_batch_preds) ''' # 2 since torch.flip() is available now, the loss can also be directly computed without defining a new op #''' m, _ = torch.max(batch_preds_shuffled_ties, dim=1, keepdim=True) # a transformation aiming for higher stability when computing softmax() with exp() y = batch_preds_shuffled_ties - m y = torch.exp(y) y_backward_cumsum = torch.flip(torch.cumsum(torch.flip(y, dims=[1]), dim=1), dims=[1]) # row-wise cumulative sum, from tail to head batch_logcumsumexps = torch.log(y_backward_cumsum) + m # corresponding to the '-m' operation batch_loss = torch.sum(torch.sum((batch_logcumsumexps - batch_preds_shuffled_ties), dim=1)) #''' self.optimizer.zero_grad() batch_loss.backward() self.optimizer.step() return batch_loss
@param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents associated with the same query @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query @param kwargs: @return:
custom_loss_function
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/listmle.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/listmle.py
MIT
def custom_loss_function(self, batch_preds, batch_std_labels, **kwargs): ''' The Top-1 approximated ListNet loss, which reduces to a softmax and simple cross entropy. @param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents associated with the same query @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query @param kwargs: @return: ''' ''' #- deprecated way -# batch_top1_pros_pred = F.softmax(batch_preds, dim=1) batch_top1_pros_std = F.softmax(batch_stds, dim=1) batch_loss = torch.sum(-torch.sum(batch_top1_pros_std * torch.log(batch_top1_pros_pred), dim=1)) ''' # todo-as-note: log(softmax(x)), doing these two operations separately is slower, and numerically unstable. # c.f. https://pytorch.org/docs/stable/_modules/torch/nn/functional.html batch_loss = torch.sum(-torch.sum(F.softmax(batch_std_labels, dim=1) * F.log_softmax(batch_preds, dim=1), dim=1)) self.optimizer.zero_grad() batch_loss.backward() self.optimizer.step() return batch_loss
The Top-1 approximated ListNet loss, which reduces to a softmax and simple cross entropy. @param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents associated with the same query @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query @param kwargs: @return:
custom_loss_function
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/listnet.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/listnet.py
MIT
def custom_loss_function(self, batch_preds, batch_std_labels, **kwargs): ''' @param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents associated with the same query @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query @param kwargs: @return: ''' # aiming for meaningful batch-normalization, please set {train_rough_batch_size, validation_rough_batch_size, test_rough_batch_size = 1, 1, 1} assert 1 == batch_preds.size(0) assert 'presort' in kwargs and kwargs['presort'] is True # aiming for direct usage of ideal ranking if 'PL' == self.distribution: batch_sample_inds, batch_action_preds = sample_ranking_PL(batch_preds=batch_preds, only_indices=False, temperature=self.temperature) elif 'STPL' == self.distribution: batch_sample_inds, batch_action_preds = sample_ranking_PL_gumbel_softmax( batch_preds=batch_preds, only_indices=False, temperature=self.temperature, device=self.device) else: raise NotImplementedError top_k = batch_std_labels.size(1) if self.top_k is None else self.top_k batch_action_stds = torch.gather(batch_std_labels, dim=1, index=batch_sample_inds) if self.pg_checking: sample_metric_values = torch_ndcg_at_k(batch_predict_rankings=batch_action_stds, batch_ideal_rankings=batch_std_labels, k=5, device=self.device) # TODO alternative metrics, such as AP and NERR batch_gains = torch.pow(2.0, batch_action_stds) - 1.0 batch_ranks = torch.arange(top_k, dtype=torch.float, device=self.device).view(1, -1) batch_discounts = torch.log2(2.0 + batch_ranks) batch_rewards = batch_gains[:, 0:top_k] / batch_discounts # the long-term return of the sampled episode starting from t """ this is also the key difference, equivalently, weighting is different """ batch_G_t = torch.flip(torch.cumsum(torch.flip(batch_rewards, dims=[1]), dim=1), dims=[1]) if self.gamma != 1.0: return_discounts = torch.cumprod(torch.ones(top_k).view(1, -1) * self.gamma, dim=1) batch_G_t = batch_G_t * return_discounts m, _ = torch.max(batch_action_preds, dim=1, keepdim=True) # a transformation aiming for higher stability when computing softmax() with exp() y = batch_action_preds - m y = torch.exp(y) y_cumsum_t2h = torch.flip(torch.cumsum(torch.flip(y, dims=[1]), dim=1), dims=[1]) # row-wise cumulative sum, from tail to head batch_logcumsumexps = torch.log(y_cumsum_t2h) + m # corresponding to the '-m' operation batch_neg_log_probs = batch_logcumsumexps[:, 0:top_k] - batch_action_preds[:, 0:top_k] batch_loss = torch.sum(torch.sum(batch_neg_log_probs * batch_G_t[:, 0:top_k], dim=1)) self.optimizer.zero_grad() batch_loss.backward() self.optimizer.step() if self.pg_checking: return sample_metric_values else: return batch_loss
@param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents associated with the same query @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query @param kwargs: @return:
custom_loss_function
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/mdprank.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/mdprank.py
MIT
def to_para_string(self, log=False, given_para_dict=None): """ String identifier of parameters :param log: :param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search """ # using specified para-dict or inner para-dict MDPRank_para_dict = given_para_dict if given_para_dict is not None else self.MDPRank_para_dict s1 = ':' if log else '_' top_k, distribution, gamma, temperature=MDPRank_para_dict['top_k'], MDPRank_para_dict['distribution'],\ MDPRank_para_dict['gamma'], MDPRank_para_dict['temperature'] fastMDPRank_para_str = s1.join([str(top_k), distribution, 'G', '{:,g}'.format(gamma), 'T', '{:,g}'.format(temperature)]) return fastMDPRank_para_str
String identifier of parameters :param log: :param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search
to_para_string
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/mdprank.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/mdprank.py
MIT
def grid_search(self): """ Iterator of parameter settings for FastMDPRank """ if self.use_json: #choice_metric = json_dict['metric'] choice_topk = self.json_dict['top_k'] choice_distribution = self.json_dict['distribution'] choice_temperature = self.json_dict['temperature'] choice_gamma = self.json_dict['gamma'] else: #choice_metric = ['NERR', 'nDCG', 'AP'] # 'nDCG', 'AP', 'NERR' choice_topk = [10] if self.debug else [10] choice_distribution = ['PL'] choice_temperature = [.1] if self.debug else [1.0] # 1.0, 10.0 choice_gamma = [1.0] for top_k, distribution, temperature, gamma in product(choice_topk, choice_distribution, choice_temperature, choice_gamma): self.MDPRank_para_dict = dict(model_id=self.model_id, top_k=top_k, gamma=gamma, distribution=distribution, temperature=temperature) yield self.MDPRank_para_dict
Iterator of parameter settings for FastMDPRank
grid_search
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/mdprank.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/mdprank.py
MIT
def custom_loss_function(self, batch_preds, batch_std_labels, **kwargs): ''' @param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents associated with the same query @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query @param kwargs: @return: ''' batch_loss = torch.sum((1.0 - cos(batch_preds, batch_std_labels)) / 0.5) self.optimizer.zero_grad() batch_loss.backward() self.optimizer.step() return batch_loss
@param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents associated with the same query @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query @param kwargs: @return:
custom_loss_function
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/rank_cosine.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/rank_cosine.py
MIT
def custom_loss_function(self, batch_preds, batch_std_labels, **kwargs): ''' @param batch_preds: [batch, ranking_size] each row represents the mean predictions for documents associated with the same query @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query @param kwargs: @return: ''' assert 'presort' in kwargs and kwargs['presort'] is True # aiming for direct usage of ideal ranking assert 'nDCG' == self.metric # TODO support more metrics assert LABEL_TYPE.MultiLabel == kwargs['label_type'] # other types are not considered yet label_type = kwargs['label_type'] batch_mus = batch_preds ''' expected ranks ''' # f_ij, i.e., mean difference batch_pairsub_mus = torch.unsqueeze(batch_mus, dim=2) - torch.unsqueeze(batch_mus, dim=1) # variance w.r.t. s_i - s_j, which is equal to sigma^2_i + sigma^2_j pairsub_vars = 2 * self.delta**2 # \Phi(0)$ batch_Phi0 = 0.5 * torch.erfc(batch_pairsub_mus / torch.sqrt(2 * pairsub_vars)) # remove diagonal entries batch_Phi0_subdiag = torch.triu(batch_Phi0, diagonal=1) + torch.tril(batch_Phi0, diagonal=-1) batch_expt_ranks = torch.sum(batch_Phi0_subdiag, dim=2) + 1.0 batch_gains = torch.pow(2.0, batch_std_labels) - 1.0 batch_dists = 1.0 / torch.log2(batch_expt_ranks + 1.0) # discount co-efficients batch_idcgs = torch_dcg_at_k(batch_rankings=batch_std_labels, label_type=label_type, device=self.device) #TODO check the effect of removing batch_idcgs if self.top_k is None: batch_dcgs = batch_dists * batch_gains batch_expt_nDCG = torch.sum(batch_dcgs/batch_idcgs, dim=1) batch_loss = - torch.sum(batch_expt_nDCG) else: k = min(self.top_k, batch_std_labels.size(1)) batch_dcgs = batch_dists[:, 0:k] * batch_gains[:, 0:k] batch_expt_nDCG_k = torch.sum(batch_dcgs/batch_idcgs, dim=1) batch_loss = - torch.sum(batch_expt_nDCG_k) self.optimizer.zero_grad() batch_loss.backward() self.optimizer.step() return batch_loss
@param batch_preds: [batch, ranking_size] each row represents the mean predictions for documents associated with the same query @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query @param kwargs: @return:
custom_loss_function
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/softrank.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/softrank.py
MIT
def to_para_string(self, log=False, given_para_dict=None): """ String identifier of parameters :param log: :param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search :return: """ # using specified para-dict or inner para-dict soft_para_dict = given_para_dict if given_para_dict is not None else self.soft_para_dict s1, s2 = (':', '\n') if log else ('_', '_') metric, delta, top_k = soft_para_dict['metric'], soft_para_dict['delta'], soft_para_dict['top_k'] if top_k is not None: softrank_para_str = s1.join([metric, str(top_k), 'Delta', '{:,g}'.format(delta)]) else: softrank_para_str = s1.join([metric, 'Delta', '{:,g}'.format(delta)]) return softrank_para_str
String identifier of parameters :param log: :param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search :return:
to_para_string
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/softrank.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/softrank.py
MIT
def grid_search(self): """ Iterator of parameter settings for SoftRank """ if self.use_json: choice_topk = self.json_dict['top_k'] choice_delta = self.json_dict['delta'] choice_metric = self.json_dict['metric'] else: choice_delta = [5.0, 1.0] if self.debug else [1.0] # 1.0, 10.0, 50.0, 100.0 choice_metric = ['nDCG'] # 'nDCG' choice_topk = [None] if self.debug else [None] for delta, top_k, metric in product(choice_delta, choice_topk, choice_metric): self.soft_para_dict = dict(model_id=self.model_id, delta=delta, top_k=top_k, metric=metric) yield self.soft_para_dict
Iterator of parameter settings for SoftRank
grid_search
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/softrank.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/softrank.py
MIT
def custom_loss_function(self, batch_preds, batch_std_labels, **kwargs): ''' The Top-1 approximated ListNet loss, which reduces to a softmax and simple cross entropy. @param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents associated with the same query @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query @param kwargs: @return: ''' unif = torch.rand(batch_preds.size(), device=self.device) # [batch_size, ranking_size] gumbel = -torch.log(-torch.log(unif + EPS) + EPS) # Sample from gumbel distribution batch_preds = (batch_preds + gumbel) / self.temperature # todo-as-note: log(softmax(x)), doing these two operations separately is slower, and numerically unstable. # c.f. https://pytorch.org/docs/stable/_modules/torch/nn/functional.html batch_loss = torch.sum(-torch.sum(F.softmax(batch_std_labels, dim=1) * F.log_softmax(batch_preds, dim=1), dim=1)) self.optimizer.zero_grad() batch_loss.backward() self.optimizer.step() return batch_loss
The Top-1 approximated ListNet loss, which reduces to a softmax and simple cross entropy. @param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents associated with the same query @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query @param kwargs: @return:
custom_loss_function
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/st_listnet.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/st_listnet.py
MIT
def to_para_string(self, log=False, given_para_dict=None): """ String identifier of parameters :param log: :param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search :return: """ # using specified para-dict or inner para-dict stlistnet_para_dict = given_para_dict if given_para_dict is not None else self.stlistnet_para_dict s1 = ':' if log else '_' stlistnet_para_str = s1.join(['Tem', str(stlistnet_para_dict['temperature'])]) return stlistnet_para_str
String identifier of parameters :param log: :param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search :return:
to_para_string
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/st_listnet.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/st_listnet.py
MIT
def grid_search(self): """ Iterator of parameter settings for STListNet :param debug: :return: """ if self.use_json: choice_temperature = self.json_dict['temperature'] else: choice_temperature = [1.0] if self.debug else [1.0] # 1.0, 10.0, 50.0, 100.0 for temperature in choice_temperature: self.stlistnet_para_dict = dict(model_id=self.model_id, temperature=temperature) yield self.stlistnet_para_dict
Iterator of parameter settings for STListNet :param debug: :return:
grid_search
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/st_listnet.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/st_listnet.py
MIT
def __init__(self, eps, max_iter, reduction='mean'): """ eps (float): regularization coefficient max_iter (int): maximum number of Sinkhorn iterations """ super(EntropicOT, self).__init__() self.eps = eps self.max_iter = max_iter self.reduction = reduction
eps (float): regularization coefficient max_iter (int): maximum number of Sinkhorn iterations
__init__
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/wassrank/pytorch_wasserstein.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/wassrank/pytorch_wasserstein.py
MIT
def forward(ctx, pred, target, cost, lam, sh_num_iter): """ pred: Batch * K: K = # mass points target: Batch * L: L = # mass points """ na, nb = cost.size(0), cost.size(1) assert pred.size(1) == na and target.size(1) == nb K = torch.exp(-cost / lam) KM = cost * K batch_size = pred.size(0) log_a, log_b = torch.log(pred), torch.log(target) log_u = cost.new(batch_size, na).fill_(-np.log(na)) log_v = cost.new(batch_size, nb).fill_(-np.log(nb)) for i in range(sh_num_iter): log_u_max = torch.max(log_u, dim=1, keepdim=True)[0] u_stab = torch.exp(log_u - log_u_max) log_v = log_b - torch.log(torch.mm(K.t(), u_stab.t()).t()) - log_u_max log_v_max = torch.max(log_v, dim=1, keepdim=True)[0] v_stab = torch.exp(log_v - log_v_max) log_u = log_a - torch.log(torch.mm(K, v_stab.t()).t()) - log_v_max #error prompted due to the usage of expand_as() ''' log_u_max = torch.max(log_u, dim=1)[0] u_stab = torch.exp(log_u - log_u_max.expand_as(log_u)) log_v = log_b - torch.log(torch.mm(self.K.t(), u_stab.t()).t()) - log_u_max.expand_as(log_v) log_v_max = torch.max(log_v, dim=1)[0] v_stab = torch.exp(log_v - log_v_max.expand_as(log_v)) log_u = log_a - torch.log(torch.mm(self.K, v_stab.t()).t()) - log_v_max.expand_as(log_u) ''' #alternative way due to expand_as() log_v_max = torch.max(log_v, dim=1, keepdim=True)[0] v_stab = torch.exp(log_v - log_v_max) logcostpart1 = torch.log(torch.mm(KM, v_stab.t()).t()) + log_v_max ''' log_v_max = torch.max(log_v, dim=1)[0] v_stab = torch.exp(log_v - log_v_max.expand_as(log_v)) logcostpart1 = torch.log(torch.mm(self.KM, v_stab.t()).t()) + log_v_max.expand_as(log_u) ''' wnorm = torch.exp(log_u + logcostpart1).mean(0).sum() # sum(1) for per item pair loss... grad = log_u * lam grad = grad - torch.mean(grad, dim=1, keepdim=True) grad = grad - torch.mean(grad, dim=1, keepdim=True) ''' grad = grad - torch.mean(grad, dim=1).expand_as(grad) grad = grad - torch.mean(grad, dim=1).expand_as(grad) # does this help over only once? ''' grad = grad / batch_size ctx.save_for_backward(grad) return cost.new((wnorm,))
pred: Batch * K: K = # mass points target: Batch * L: L = # mass points
forward
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/wassrank/pytorch_wasserstein.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/wassrank/pytorch_wasserstein.py
MIT
def _cost_mat_group(cpu_tor_batch_std_label_vec, non_rele_gap=100.0, var_penalty=0.01, gain_base=4.0): """ Numpy reference Take into account the group information among documents, namely whether two documents are of the same standard relevance degree @param non_rele_gap the gap between a relevant document and an irrelevant document @param var_penalty variance penalty @param gain_base the base for computing gain value """ size_ranking = cpu_tor_batch_std_label_vec.size(1) std_label_vec = cpu_tor_batch_std_label_vec[0, :].numpy() cost_mat = np.zeros(shape=(size_ranking, size_ranking), dtype=np.float32) for i in range(size_ranking): i_rele_level = std_label_vec[i] for j in range(size_ranking): if i==j: cost_mat[i, j] = 0 else: j_rele_level = std_label_vec[j] if i_rele_level == j_rele_level: cost_mat[i, j] = var_penalty else: cost_mat[i, j] = np.abs(rele_gain(i_rele_level, gain_base=gain_base) - rele_gain(j_rele_level, gain_base=gain_base)) if 0 == i_rele_level or 0 == j_rele_level: #enforce the margin between relevance and non-relevance cost_mat[i, j] += non_rele_gap return cost_mat
Numpy reference Take into account the group information among documents, namely whether two documents are of the same standard relevance degree @param non_rele_gap the gap between a relevant document and an irrelevant document @param var_penalty variance penalty @param gain_base the base for computing gain value
_cost_mat_group
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/wassrank/wasserstein_cost_mat.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/wassrank/wasserstein_cost_mat.py
MIT
def torch_cost_mat_dist(batch_std_labels, exponent=1.0, gpu=False): """ Viewing the absolute difference (with a exponent value) between two rank positions as the cost """ batch_size = batch_std_labels.size(0) ranking_size = batch_std_labels.size(1) positions = (torch.arange(ranking_size) + 1.0).type(torch.cuda.FloatTensor) if gpu else (torch.arange(ranking_size) + 1.0).type(torch.FloatTensor) C = positions.view(-1, 1) - positions.view(1, -1) C = torch.abs(C) batch_C = C.expand(batch_size, -1, -1) if exponent > 1.0: batch_C = torch.pow(batch_C, exponent) return batch_C
Viewing the absolute difference (with a exponent value) between two rank positions as the cost
torch_cost_mat_dist
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/wassrank/wasserstein_cost_mat.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/wassrank/wasserstein_cost_mat.py
MIT
def get_delta_gains(batch_stds, discount=False, gpu=False): ''' Delta-gains w.r.t. pairwise swapping of the ideal ltr_adhoc :param batch_stds: the standard labels sorted in a descending order :return: ''' batch_gains = torch.pow(2.0, batch_stds) - 1.0 batch_g_diffs = torch.unsqueeze(batch_gains, dim=2) - torch.unsqueeze(batch_gains, dim=1) if discount: batch_std_ranks = torch.arange(batch_stds.size(1)).type(torch.cuda.FloatTensor) if gpu else torch.arange(batch_stds.size(1)).type(torch.FloatTensor) batch_dists = 1.0 / torch.log2(batch_std_ranks + 2.0) # discount co-efficients batch_dists = torch.unsqueeze(batch_dists, dim=0) batch_dists_diffs = torch.unsqueeze(batch_dists, dim=2) - torch.unsqueeze(batch_dists, dim=1) batch_delta_gs = torch.abs(batch_g_diffs) * torch.abs(batch_dists_diffs) # absolute changes w.r.t. pairwise swapping else: batch_delta_gs = torch.abs(batch_g_diffs) # absolute delta gains w.r.t. pairwise swapping return batch_delta_gs
Delta-gains w.r.t. pairwise swapping of the ideal ltr_adhoc :param batch_stds: the standard labels sorted in a descending order :return:
get_delta_gains
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/wassrank/wasserstein_cost_mat.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/wassrank/wasserstein_cost_mat.py
MIT
def torch_cost_mat_group(batch_std_labels, non_rele_gap=100.0, var_penalty=0.01, gain_base=2.0, gpu=False): """ Take into account the group information among documents, namely whether two documents are of the same standard relevance degree :param batch_std_labels: standard relevance labels :param non_rele_gap: the gap between a relevant document and an irrelevant document :param var_penalty: variance penalty w.r.t. the transportation among documents of the same label :param gain_base: the base for computing gain value :return: cost matrices """ batch_size = batch_std_labels.size(0) ranking_size = batch_std_labels.size(1) batch_std_gains = torch.pow(gain_base, batch_std_labels) - 1.0 torch_non_rele_gap = torch.cuda.FloatTensor([non_rele_gap]) if gpu else torch.FloatTensor([non_rele_gap]) batch_std_gains_gaps = torch.where(batch_std_gains < 1.0, -torch_non_rele_gap, batch_std_gains) # add the gap between relevance and non-relevance batch_std_costs = batch_std_gains_gaps.view(batch_size, ranking_size, 1) - batch_std_gains_gaps.view(batch_size, 1, ranking_size) batch_std_costs = torch.abs(batch_std_costs) # symmetric cost, i.e., C_{ij} = C_{ji} # add variance penalty, i.e., the cost of transport among positions of the same relevance level. But the diagonal entries need to be revised later torch_var_penalty = torch.cuda.FloatTensor([var_penalty]) if gpu else torch.FloatTensor([var_penalty]) batch_C = torch.where(batch_std_costs < 1.0, torch_var_penalty, batch_std_costs) torch_eye = torch.eye(ranking_size).type(torch.cuda.FloatTensor) if gpu else torch.eye(ranking_size).type(torch.FloatTensor) diag = torch_eye * var_penalty batch_diags = diag.expand(batch_size, -1, -1) batch_C = batch_C - batch_diags # revise the diagonal entries to zero again return batch_C
Take into account the group information among documents, namely whether two documents are of the same standard relevance degree :param batch_std_labels: standard relevance labels :param non_rele_gap: the gap between a relevant document and an irrelevant document :param var_penalty: variance penalty w.r.t. the transportation among documents of the same label :param gain_base: the base for computing gain value :return: cost matrices
torch_cost_mat_group
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/wassrank/wasserstein_cost_mat.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/wassrank/wasserstein_cost_mat.py
MIT
def get_explicit_cost_mat(batch_std_labels, wass_para_dict=None, gpu=False): """ Initialize the cost matrix based on pre-defined (prior) knowledge :param batch_std_labels: :param wass_para_dict: :return: """ cost_type = wass_para_dict['cost_type'] if cost_type == 'p1': # |x-y| batch_C = torch_cost_mat_dist(batch_std_labels, gpu=gpu) elif cost_type == 'p2': # |x-y|^2 batch_C = torch_cost_mat_dist(batch_std_labels, exponent=2.0, gpu=gpu) elif cost_type == 'eg': # explicit grouping of relevance labels gain_base, non_rele_gap, var_penalty = wass_para_dict['gain_base'], wass_para_dict['non_rele_gap'], wass_para_dict['var_penalty'] batch_C = torch_cost_mat_group(batch_std_labels, non_rele_gap=non_rele_gap, var_penalty=var_penalty, gain_base=gain_base, gpu=gpu) elif cost_type == 'dg': # delta gain batch_C = get_delta_gains(batch_std_labels, gpu=gpu) elif cost_type == 'ddg': # delta discounted gain batch_C = get_delta_gains(batch_std_labels, discount=True, gpu=gpu) else: raise NotImplementedError return batch_C
Initialize the cost matrix based on pre-defined (prior) knowledge :param batch_std_labels: :param wass_para_dict: :return:
get_explicit_cost_mat
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/wassrank/wasserstein_cost_mat.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/wassrank/wasserstein_cost_mat.py
MIT
def get_standard_normalized_histogram_ST(batch_std_labels, non_rele_as=0.0, adjust_softmax=True): """ Convert to a normalized histogram based on softmax The underlying trick is to down-weight the mass of non-relevant labels: treat them as one, then average the probability mass """ if adjust_softmax: batch_ones = torch.ones_like(batch_std_labels) batch_zeros = torch.zeros_like(batch_std_labels) batch_non_rele_ones = torch.where(batch_std_labels > 0.0, batch_zeros, batch_ones) batch_non_cnts = torch.sum(batch_non_rele_ones, dim=1) batch_std_exps = torch.exp(batch_std_labels) if non_rele_as != 0.0: batch_rele_ones = 1.0 - batch_non_rele_ones batch_non_vals = batch_non_rele_ones * non_rele_as batch_non_avgs = (torch.exp(batch_non_vals) - batch_rele_ones) /batch_non_cnts else: batch_non_avgs = batch_non_rele_ones / batch_non_cnts batch_std_adjs = batch_std_exps - batch_non_rele_ones + batch_non_avgs batch_histograms = batch_std_adjs/torch.sum(batch_std_adjs) else: batch_histograms = F.softmax(batch_std_labels, dim=1) return batch_histograms
Convert to a normalized histogram based on softmax The underlying trick is to down-weight the mass of non-relevant labels: treat them as one, then average the probability mass
get_standard_normalized_histogram_ST
python
wildltr/ptranking
ptranking/ltr_adhoc/listwise/wassrank/wasserstein_cost_mat.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/wassrank/wasserstein_cost_mat.py
MIT