code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def cal_metric_at_ks(self, model_id, all_std_labels=None, all_preds=None, group=None, ks=[1, 3, 5, 10], label_type=None): """ Compute metric values with different cutoff values :param model: :param all_std_labels: :param all_preds: :param group: :param ks: :return: """ cnt = torch.zeros(1) sum_ndcg_at_ks = torch.zeros(len(ks)) sum_nerr_at_ks = torch.zeros(len(ks)) sum_ap_at_ks = torch.zeros(len(ks)) sum_p_at_ks = torch.zeros(len(ks)) list_ndcg_at_ks_per_q = [] list_err_at_ks_per_q = [] list_ap_at_ks_per_q = [] list_p_at_ks_per_q = [] tor_all_std_labels, tor_all_preds = \ torch.from_numpy(all_std_labels.astype(np.float32)), torch.from_numpy(all_preds.astype(np.float32)) head = 0 if model_id.startswith('LightGBM'): group = group.astype(np.int).tolist() for gr in group: tor_per_query_std_labels = tor_all_std_labels[head:head+gr] tor_per_query_preds = tor_all_preds[head:head+gr] head += gr _, tor_sorted_inds = torch.sort(tor_per_query_preds, descending=True) batch_predict_rankings = tor_per_query_std_labels[tor_sorted_inds] batch_ideal_rankings, _ = torch.sort(tor_per_query_std_labels, descending=True) ndcg_at_ks = torch_ndcg_at_ks(batch_predict_rankings=batch_predict_rankings.view(1, -1), batch_ideal_rankings=batch_ideal_rankings.view(1, -1), ks=ks, label_type=label_type) ndcg_at_ks = torch.squeeze(ndcg_at_ks, dim=0) list_ndcg_at_ks_per_q.append(ndcg_at_ks.numpy()) nerr_at_ks = torch_nerr_at_ks(batch_predict_rankings=batch_predict_rankings.view(1, -1), batch_ideal_rankings=batch_ideal_rankings.view(1, -1), ks=ks, label_type=label_type) nerr_at_ks = torch.squeeze(nerr_at_ks, dim=0) list_err_at_ks_per_q.append(nerr_at_ks.numpy()) ap_at_ks = torch_ap_at_ks(batch_predict_rankings=batch_predict_rankings.view(1, -1), batch_ideal_rankings=batch_ideal_rankings.view(1, -1), ks=ks) ap_at_ks = torch.squeeze(ap_at_ks, dim=0) list_ap_at_ks_per_q.append(ap_at_ks.numpy()) p_at_ks = torch_precision_at_ks(batch_predict_rankings=batch_predict_rankings.view(1, -1), ks=ks) p_at_ks = torch.squeeze(p_at_ks, dim=0) list_p_at_ks_per_q.append(p_at_ks.numpy()) sum_ndcg_at_ks = torch.add(sum_ndcg_at_ks, ndcg_at_ks) sum_nerr_at_ks = torch.add(sum_nerr_at_ks, nerr_at_ks) sum_ap_at_ks = torch.add(sum_ap_at_ks, ap_at_ks) sum_p_at_ks = torch.add(sum_p_at_ks, p_at_ks) cnt += 1 tor_avg_ndcg_at_ks = sum_ndcg_at_ks / cnt avg_ndcg_at_ks = tor_avg_ndcg_at_ks.data.numpy() tor_avg_nerr_at_ks = sum_nerr_at_ks / cnt avg_nerr_at_ks = tor_avg_nerr_at_ks.data.numpy() tor_avg_ap_at_ks = sum_ap_at_ks / cnt avg_ap_at_ks = tor_avg_ap_at_ks.data.numpy() tor_avg_p_at_ks = sum_p_at_ks / cnt avg_p_at_ks = tor_avg_p_at_ks.data.numpy() return avg_ndcg_at_ks, avg_nerr_at_ks, avg_ap_at_ks, avg_p_at_ks,\ list_ndcg_at_ks_per_q, list_err_at_ks_per_q, list_ap_at_ks_per_q, list_p_at_ks_per_q
Compute metric values with different cutoff values :param model: :param all_std_labels: :param all_preds: :param group: :param ks: :return:
cal_metric_at_ks
python
wildltr/ptranking
ptranking/ltr_tree/eval/ltr_tree.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/eval/ltr_tree.py
MIT
def setup_eval(self, data_dict, eval_dict): """ Perform some checks, and revise some setting due to the debug mode :param data_dict: :param eval_dict: :return: """ # required setting to be consistent with the dataset if data_dict['data_id'] == 'Istella': assert eval_dict['do_validation'] is not True # since there is no validation data self.output_root = self.setup_output(data_dict=data_dict, eval_dict=eval_dict) if not os.path.exists(self.output_root): os.makedirs(self.output_root) self.save_model_dir = self.output_root if eval_dict['do_log']: sys.stdout = open(self.output_root + 'log.txt', "w")
Perform some checks, and revise some setting due to the debug mode :param data_dict: :param eval_dict: :return:
setup_eval
python
wildltr/ptranking
ptranking/ltr_tree/eval/ltr_tree.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/eval/ltr_tree.py
MIT
def update_save_model_dir(self, data_dict=None, fold_k=None): """ Update the directory for saving model file when there are multiple folds :param data_dict: :param fold_k: :return: """ if data_dict['data_id'] in MSLETOR or data_dict['data_id'] in MSLRWEB: self.save_model_dir = self.output_root + '-'.join(['Fold', str(fold_k)]) + '/' if not os.path.exists(self.save_model_dir): os.makedirs(self.save_model_dir)
Update the directory for saving model file when there are multiple folds :param data_dict: :param fold_k: :return:
update_save_model_dir
python
wildltr/ptranking
ptranking/ltr_tree/eval/ltr_tree.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/eval/ltr_tree.py
MIT
def kfold_cv_eval(self, data_dict=None, eval_dict=None, model_para_dict=None): """ Evaluation based on k-fold cross validation if multiple folds exist :param data_dict: :param eval_dict: :param model_para_dict: :return: """ self.display_information(data_dict=data_dict) self.setup_eval(data_dict=data_dict, eval_dict=eval_dict) model_id, data_id = self.model_parameter.model_id, data_dict['data_id'] fold_num = data_dict['fold_num'] # updated due to the debug mode cutoffs, do_validation = eval_dict['cutoffs'], eval_dict['do_validation'] tree_ranker = globals()[model_id](model_para_dict) time_begin = datetime.datetime.now() # timing l2r_cv_avg_ndcg_scores = np.zeros(len(cutoffs)) # fold average l2r_cv_avg_nerr_scores = np.zeros(len(cutoffs)) # fold average l2r_cv_avg_ap_scores = np.zeros(len(cutoffs)) # fold average l2r_cv_avg_p_scores = np.zeros(len(cutoffs)) # fold average list_all_fold_ndcg_at_ks_per_q = [] list_all_fold_err_at_ks_per_q = [] list_all_fold_ap_at_ks_per_q = [] list_all_fold_p_at_ks_per_q = [] for fold_k in range(1, fold_num + 1): # determine the file paths file_train, file_vali, file_test = self.determine_files(data_dict=data_dict, fold_k=fold_k) self.update_save_model_dir(data_dict=data_dict, fold_k=fold_k) y_test, group_test, y_pred = tree_ranker.run(fold_k=fold_k, file_train=file_train, file_vali=file_vali, file_test=file_test, data_dict=data_dict, eval_dict=eval_dict, save_model_dir=self.save_model_dir) fold_avg_ndcg_at_ks, fold_avg_nerr_at_ks, fold_avg_ap_at_ks, fold_avg_p_at_ks,\ list_ndcg_at_ks_per_q, list_err_at_ks_per_q, list_ap_at_ks_per_q, list_p_at_ks_per_q = \ self.cal_metric_at_ks(model_id=model_id, all_std_labels=y_test, all_preds=y_pred, group=group_test, ks=cutoffs, label_type=data_dict['label_type']) performance_list = [model_id] if data_id in YAHOO_LTR or data_id in ISTELLA_LTR else [model_id + ' Fold-' + str(fold_k)] for i, co in enumerate(cutoffs): print(fold_avg_ndcg_at_ks) performance_list.append('\nnDCG@{}:{:.4f}'.format(co, fold_avg_ndcg_at_ks[i])) for i, co in enumerate(cutoffs): performance_list.append('\nnERR@{}:{:.4f}'.format(co, fold_avg_nerr_at_ks[i])) for i, co in enumerate(cutoffs): performance_list.append('\nMAP@{}:{:.4f}'.format(co, fold_avg_ap_at_ks[i])) for i, co in enumerate(cutoffs): performance_list.append('\nP@{}:{:.4f}'.format(co, fold_avg_p_at_ks[i])) performance_str = '\t'.join(performance_list) print('\n\t', performance_str) l2r_cv_avg_ndcg_scores = np.add(l2r_cv_avg_ndcg_scores, fold_avg_ndcg_at_ks) # sum for later cv-performance l2r_cv_avg_nerr_scores = np.add(l2r_cv_avg_nerr_scores, fold_avg_nerr_at_ks) # sum for later cv-performance l2r_cv_avg_ap_scores = np.add(l2r_cv_avg_ap_scores, fold_avg_ap_at_ks) # sum for later cv-performance l2r_cv_avg_p_scores = np.add(l2r_cv_avg_p_scores, fold_avg_p_at_ks) # sum for later cv-performance list_all_fold_ndcg_at_ks_per_q.extend(list_ndcg_at_ks_per_q) list_all_fold_err_at_ks_per_q.extend(list_err_at_ks_per_q) list_all_fold_ap_at_ks_per_q.extend(list_ap_at_ks_per_q) list_all_fold_p_at_ks_per_q.extend(list_p_at_ks_per_q) time_end = datetime.datetime.now() # overall timing elapsed_time_str = str(time_end - time_begin) print('Elapsed time:\t', elapsed_time_str + "\n") print() # begin to print either cv or average performance l2r_cv_avg_ndcg_scores = np.divide(l2r_cv_avg_ndcg_scores, fold_num) l2r_cv_avg_nerr_scores = np.divide(l2r_cv_avg_nerr_scores, fold_num) l2r_cv_avg_ap_scores = np.divide(l2r_cv_avg_ap_scores, fold_num) l2r_cv_avg_p_scores = np.divide(l2r_cv_avg_p_scores, fold_num) if do_validation: eval_prefix = str(fold_num)+'-fold cross validation scores:' else: eval_prefix = str(fold_num) + '-fold average scores:' print(model_id, eval_prefix, self.result_to_str(list_scores=l2r_cv_avg_ndcg_scores, list_cutoffs=cutoffs, metric_str='nDCG')) print(model_id, eval_prefix, self.result_to_str(list_scores=l2r_cv_avg_nerr_scores, list_cutoffs=cutoffs, metric_str='nERR')) print(model_id, eval_prefix, self.result_to_str(list_scores=l2r_cv_avg_ap_scores, list_cutoffs=cutoffs, metric_str='MAP')) print(model_id, eval_prefix, self.result_to_str(list_scores=l2r_cv_avg_p_scores, list_cutoffs=cutoffs, metric_str='P')) all_fold_ndcg_at_ks_per_q = np.vstack(list_all_fold_ndcg_at_ks_per_q) all_fold_err_at_ks_per_q = np.vstack(list_all_fold_err_at_ks_per_q) all_fold_ap_at_ks_per_q = np.vstack(list_all_fold_ap_at_ks_per_q) all_fold_p_at_ks_per_q = np.vstack(list_all_fold_p_at_ks_per_q) pickle_save(all_fold_ndcg_at_ks_per_q, file=self.output_root + '_'.join([data_id, model_id, 'all_fold_ndcg_at_ks_per_q.np'])) pickle_save(all_fold_err_at_ks_per_q, file=self.output_root + '_'.join([data_id, model_id, 'all_fold_err_at_ks_per_q.np'])) pickle_save(all_fold_ap_at_ks_per_q, file=self.output_root + '_'.join([data_id, model_id, 'all_fold_ap_at_ks_per_q.np'])) pickle_save(all_fold_p_at_ks_per_q, file=self.output_root + '_'.join([data_id, model_id, 'all_fold_p_at_ks_per_q.np'])) return l2r_cv_avg_ndcg_scores, l2r_cv_avg_nerr_scores, l2r_cv_avg_ap_scores, l2r_cv_avg_p_scores
Evaluation based on k-fold cross validation if multiple folds exist :param data_dict: :param eval_dict: :param model_para_dict: :return:
kfold_cv_eval
python
wildltr/ptranking
ptranking/ltr_tree/eval/ltr_tree.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/eval/ltr_tree.py
MIT
def point_run(self, debug=False, model_id=None, data_id=None, dir_data=None, dir_output=None): """ Perform one-time run based on given setting. :param debug: :param model_id: :param data_id: :param dir_data: :param dir_output: :return: """ self.set_eval_setting(debug=debug, dir_output=dir_output) self.set_data_setting(debug=debug, data_id=data_id, dir_data=dir_data) self.set_model_setting(debug=debug, model_id=model_id) self.kfold_cv_eval(data_dict=self.get_default_data_setting(), eval_dict=self.get_default_eval_setting(), model_para_dict=self.get_default_model_setting())
Perform one-time run based on given setting. :param debug: :param model_id: :param data_id: :param dir_data: :param dir_output: :return:
point_run
python
wildltr/ptranking
ptranking/ltr_tree/eval/ltr_tree.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/eval/ltr_tree.py
MIT
def default_setting(self): """ A default setting for data loading when running lambdaMART """ scaler_id = None unknown_as_zero = True if self.data_id in MSLETOR_SEMI else False # since lambdaMART is a supervised method binary_rele = False # using the original values train_presort, validation_presort, test_presort = False, False, False train_rough_batch_size, validation_rough_batch_size, test_rough_batch_size = 1, 1, 1 scale_data, scaler_id, scaler_level = get_scaler_setting(data_id=self.data_id, scaler_id=scaler_id) # more data settings that are rarely changed self.data_dict = dict(data_id=self.data_id, dir_data=self.dir_data, min_docs=10, min_rele=1, unknown_as_zero=unknown_as_zero, binary_rele=binary_rele, train_presort=train_presort, validation_presort=validation_presort, test_presort=test_presort, train_rough_batch_size=train_rough_batch_size, validation_rough_batch_size=validation_rough_batch_size, test_rough_batch_size=test_rough_batch_size, scale_data=scale_data, scaler_id=scaler_id, scaler_level=scaler_level) data_meta = get_data_meta(data_id=self.data_id) # add meta-information self.data_dict.update(data_meta) return self.data_dict
A default setting for data loading when running lambdaMART
default_setting
python
wildltr/ptranking
ptranking/ltr_tree/eval/tree_parameter.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/eval/tree_parameter.py
MIT
def to_eval_setting_string(self, log=False): """ String identifier of eval-setting :param log: :return: """ eval_dict = self.eval_dict s1, s2 = (':', '\n') if log else ('_', '_') early_stop_or_boost_round, do_validation = eval_dict['early_stop_or_boost_round'], eval_dict['do_validation'] if do_validation: eval_string = s1.join(['EarlyStop', str(early_stop_or_boost_round)]) else: eval_string = s1.join(['BoostRound', str(early_stop_or_boost_round)]) return eval_string
String identifier of eval-setting :param log: :return:
to_eval_setting_string
python
wildltr/ptranking
ptranking/ltr_tree/eval/tree_parameter.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/eval/tree_parameter.py
MIT
def default_para_dict(self): """ Default parameter setting for LambdaMART :return: """ # for custom setting #custom_dict = dict(custom=False, custom_obj_id='lambdarank', use_LGBMRanker=True) # custom_dict = dict(custom=False, custom_obj_id=None) # common setting when using in-built lightgbm's ranker lightgbm_para_dict = {'boosting_type': 'gbdt', # ltr_gbdt, dart 'objective': 'lambdarank', # will be updated if performing customization 'metric': 'ndcg', 'learning_rate': 0.05, 'num_leaves': 400, 'num_trees': 1000, 'num_threads': 16, 'min_data_in_leaf': 50, 'min_sum_hessian_in_leaf': 200, 'eval_at': 5, # which matters much (early stopping), say setting as 5 is better than default # 'lambdamart_norm':False, # 'is_training_metric':True, 'verbosity': -1} self.para_dict = dict(custom_dict=custom_dict, lightgbm_para_dict=lightgbm_para_dict) return self.para_dict
Default parameter setting for LambdaMART :return:
default_para_dict
python
wildltr/ptranking
ptranking/ltr_tree/lambdamart/lightgbm_lambdaMART.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/lambdamart/lightgbm_lambdaMART.py
MIT
def to_para_string(self, log=False, given_para_dict=None): """ String identifier of parameters :param log: :param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search :return: """ # using specified para-dict or inner para-dict para_dict = given_para_dict if given_para_dict is not None else self.para_dict lightgbm_para_dict = para_dict['lightgbm_para_dict'] s1, s2 = (':', '\n') if log else ('_', '_') BT, metric, num_leaves, num_trees, min_data_in_leaf, min_sum_hessian_in_leaf, lr, eval_at = \ lightgbm_para_dict['boosting_type'], lightgbm_para_dict['metric'], lightgbm_para_dict['num_leaves'],\ lightgbm_para_dict['num_trees'], lightgbm_para_dict['min_data_in_leaf'],\ lightgbm_para_dict['min_sum_hessian_in_leaf'], lightgbm_para_dict['learning_rate'],\ lightgbm_para_dict['eval_at'] para_string = s2.join([s1.join(['BT', BT]), s1.join(['Metric', metric]), s1.join(['Leaves', str(num_leaves)]), s1.join(['Trees', str(num_trees)]), s1.join(['MiData', '{:,g}'.format(min_data_in_leaf)]), s1.join(['MSH', '{:,g}'.format(min_sum_hessian_in_leaf)]), s1.join(['LR', '{:,g}'.format(lr)]), s1.join(['EvalAt', str(eval_at)]) ]) return para_string
String identifier of parameters :param log: :param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search :return:
to_para_string
python
wildltr/ptranking
ptranking/ltr_tree/lambdamart/lightgbm_lambdaMART.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/lambdamart/lightgbm_lambdaMART.py
MIT
def grid_search(self): """ Iterator of parameter settings for LambdaRank """ # for custom setting #custom_dict = dict(custom=False, custom_obj_id='lambdarank', use_LGBMRanker=False) custom_dict = dict(custom=False, custom_obj_id=None) if self.use_json: choice_BT = self.json_dict['BT'] choice_metric = self.json_dict['metric'] choice_leaves = self.json_dict['leaves'] choice_trees = self.json_dict['trees'] choice_MiData = self.json_dict['MiData'] choice_MSH = self.json_dict['MSH'] choice_LR = self.json_dict['LR'] eval_at = self.json_dict['eval_at'] else: # common setting when using in-built lightgbm's ranker choice_BT = ['gbdt'] if self.debug else ['gbdt'] choice_metric = ['ndcg'] if self.debug else ['ndcg'] choice_leaves = [400] if self.debug else [400] choice_trees = [1000] if self.debug else [1000] choice_MiData = [50] if self.debug else [50] choice_MSH = [200] if self.debug else [200] choice_LR = [0.05, 0.01] if self.debug else [0.05, 0.01] eval_at = 5 for BT, metric, num_leaves, num_trees, min_data_in_leaf, min_sum_hessian_in_leaf, lr in product(choice_BT, choice_metric, choice_leaves, choice_trees, choice_MiData, choice_MSH, choice_LR): lightgbm_para_dict = {'boosting_type': BT, # ltr_gbdt, dart 'objective': 'lambdarank', 'metric': metric, 'learning_rate': lr, 'num_leaves': num_leaves, 'num_trees': num_trees, 'num_threads': 16, 'min_data_in_leaf': min_data_in_leaf, 'min_sum_hessian_in_leaf': min_sum_hessian_in_leaf, # 'lambdamart_norm':False, # 'is_training_metric':True, 'eval_at': eval_at, # which matters much (early stopping), say setting as 5 is better than default #'max_bin': 64, #'max_depth':4, 'verbosity': -1} self.para_dict = dict(custom_dict=custom_dict, lightgbm_para_dict=lightgbm_para_dict) yield self.para_dict
Iterator of parameter settings for LambdaRank
grid_search
python
wildltr/ptranking
ptranking/ltr_tree/lambdamart/lightgbm_lambdaMART.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/lambdamart/lightgbm_lambdaMART.py
MIT
def triu_indice(k=1, pair_type='NoTies', labels=None): ''' Get unique document pairs being consistent with the specified pair_type. This function is used to avoid duplicate computation. All: pairs including both pairs of documents across different relevance levels and pairs of documents having the same relevance level. NoTies: the pairs consisting of two documents of the same relevance level are removed No00: the pairs consisting of two non-relevant documents are removed :param batch_mats: [batch, m, m] :param k: the offset w.r.t. the diagonal line: k=0 means including the diagonal line, k=1 means upper triangular part without the diagonal line :return: ''' #assert pair_type in PAIR_TYPE m = len(labels) # the number of documents if pair_type == 'All': row_inds, col_inds = np.triu_indices(m, k=k) elif pair_type == 'No00': row_inds, col_inds = np.triu_indices(m, k=k) pairs = [e for e in zip(row_inds, col_inds) if not (0==labels[e[0]] and 0==labels[e[1]])] # remove pairs of 00 comparisons row_inds = [e[0] for e in pairs] col_inds = [e[1] for e in pairs] elif pair_type == '00': # the pairs consisting of two non-relevant documents row_inds, col_inds = np.triu_indices(m, k=k) pairs = [e for e in zip(row_inds, col_inds) if (0 == labels[e[0]] and 0 == labels[e[1]])] # remove pairs of 00 comparisons row_inds = [e[0] for e in pairs] col_inds = [e[1] for e in pairs] elif pair_type == 'NoTies': row_inds, col_inds = np.triu_indices(m, k=k) pairs = [e for e in zip(row_inds, col_inds) if labels[e[0]]!=labels[e[1]]] # remove pairs of documents of the same level row_inds = [e[0] for e in pairs] col_inds = [e[1] for e in pairs] else: raise NotImplementedError return row_inds, col_inds
Get unique document pairs being consistent with the specified pair_type. This function is used to avoid duplicate computation. All: pairs including both pairs of documents across different relevance levels and pairs of documents having the same relevance level. NoTies: the pairs consisting of two documents of the same relevance level are removed No00: the pairs consisting of two non-relevant documents are removed :param batch_mats: [batch, m, m] :param k: the offset w.r.t. the diagonal line: k=0 means including the diagonal line, k=1 means upper triangular part without the diagonal line :return:
triu_indice
python
wildltr/ptranking
ptranking/ltr_tree/util/lightgbm_util.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/util/lightgbm_util.py
MIT
def get_delta_ndcg(ideally_sorted_labels, labels_sorted_via_preds): ''' Delta-nDCG w.r.t. pairwise swapping of the currently predicted ranking ''' idcg = ideal_dcg(ideally_sorted_labels) # ideal discount cumulative gains gains = np.power(2.0, labels_sorted_via_preds) - 1.0 n_gains = gains / idcg # normalised gains ng_diffs = np.expand_dims(n_gains, axis=1) - np.expand_dims(n_gains, axis=0) ranks = np.arange(len(labels_sorted_via_preds)) + 1.0 dists = 1.0 / np.log2(ranks + 1.0) # discount co-efficients dists_diffs = np.expand_dims(dists, axis=1) - np.expand_dims(dists, axis=0) mat_delta_ndcg = np.abs(ng_diffs) * np.abs(dists_diffs) # absolute changes w.r.t. pairwise swapping return mat_delta_ndcg
Delta-nDCG w.r.t. pairwise swapping of the currently predicted ranking
get_delta_ndcg
python
wildltr/ptranking
ptranking/ltr_tree/util/lightgbm_util.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/util/lightgbm_util.py
MIT
def per_query_gradient_hessian_lambda(preds=None, labels=None, first_order=False, weighting=False, weighting_type='DeltaNDCG', pair_type='NoTies', epsilon=1.0): ''' Compute the corresponding gradient & hessian cf. LightGBM https://github.com/microsoft/LightGBM/blob/master/src/objective/rank_objective.hpp cf. XGBoost https://github.com/dmlc/xgboost/blob/master/src/objective/rank_obj.cc :param preds: 1-dimension predicted scores :param labels: 1-dimension ground truth :return: ''' desc_inds = np.flip(np.argsort(preds)) # indice that sort the preds in a descending order system_sorted_preds = preds[desc_inds] labels_sorted_via_preds = labels[desc_inds] row_inds, col_inds = triu_indice(labels=labels_sorted_via_preds, k=1, pair_type=pair_type) # prediction difference mat_s_ij = np.expand_dims(system_sorted_preds, axis=1) - np.expand_dims(system_sorted_preds, axis=0) # S_ij in {-1, 0, 1} is the standard indicator mat_S_ij = np.expand_dims(labels_sorted_via_preds, axis=1) - np.expand_dims(labels_sorted_via_preds, axis=0) mat_S_ij = np.clip(mat_S_ij, a_min=-1.0, a_max=1.0) num_docs, num_pairs = len(labels), len(row_inds) if first_order: grad = np.zeros((num_docs,)) else: grad, hess = np.zeros((num_docs,)), np.zeros((num_docs,)) if weighting and weighting in WEIGHTING_TYPE: if weighting_type == 'DeltaNDCG': ideally_sorted_labels = np.flip(np.sort(labels)) mat_weights = get_delta_ndcg(ideally_sorted_labels=ideally_sorted_labels, labels_sorted_via_preds=labels_sorted_via_preds) elif weighting_type == 'DeltaGain': mat_weights = get_delta_gains(labels_sorted_via_preds=labels_sorted_via_preds) for i in range(num_pairs): # iterate over pairs r, c = row_inds[i], col_inds[i] s_ij = mat_s_ij[r, c] S_ij = mat_S_ij[r, c] lambda_ij = epsilon*(sigmoid(s_ij, epsilon=epsilon) - 0.5*(1.0+S_ij)) # gradient w.r.t. s_i if weighting and weighting in WEIGHTING_TYPE: lambda_ij *= mat_weights[r, c] # delta metric variance lambda_ji = - lambda_ij # gradient w.r.t. s_j grad[desc_inds[r]] += lambda_ij # desc_inds[r] denotes the original index of the document currently being at r-th position after a full-descending-ordering by predictions grad[desc_inds[c]] += lambda_ji if not first_order: # 2nd order hessian lambda_ij_2order = np.power(epsilon, 2.0) * sigmoid(s_ij) * (1.0-sigmoid(s_ij)) lambda_ij_2order = np.maximum(lambda_ij_2order, 1e-16) # trick as XGBoost https://github.com/dmlc/xgboost/blob/master/src/objective/rank_obj.cc if weighting and weighting in WEIGHTING_TYPE: lambda_ij_2order *= mat_weights[r, c] lambda_ji_2order = -lambda_ij_2order hess[desc_inds[r]] += lambda_ij_2order hess[desc_inds[c]] += lambda_ji_2order if first_order: return grad, None else: return grad, hess
Compute the corresponding gradient & hessian cf. LightGBM https://github.com/microsoft/LightGBM/blob/master/src/objective/rank_objective.hpp cf. XGBoost https://github.com/dmlc/xgboost/blob/master/src/objective/rank_obj.cc :param preds: 1-dimension predicted scores :param labels: 1-dimension ground truth :return:
per_query_gradient_hessian_lambda
python
wildltr/ptranking
ptranking/ltr_tree/util/lightgbm_util.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/util/lightgbm_util.py
MIT
def lightgbm_custom_obj_ranknet(labels=None, preds=None, group=None): """ :param labels: numpy.ndarray of shape (size_data, ) :param preds: :param group: # numpy.ndarray of shape (num_queries, ) :return: """ size_data = len(labels) if FIRST_ORDER: all_grad, all_hess = np.zeros((size_data,)), np.full((size_data,), fill_value=CONSTANT_HESSIAN) else: all_grad, all_hess = np.zeros((size_data,)), np.zeros((size_data,)) head = 0 for num_docs_per_query in group.astype(np.int): labels_per_query = labels[head:head + num_docs_per_query] preds_per_query = preds[head:head + num_docs_per_query] grad_per_query, hess_per_query = per_query_gradient_hessian_lambda(preds=preds_per_query, labels=labels_per_query, first_order=FIRST_ORDER, pair_type='All', epsilon=1.0, weighting=False) all_grad[head:head + num_docs_per_query] = grad_per_query if not FIRST_ORDER: all_hess[head:head + num_docs_per_query] = hess_per_query head += num_docs_per_query return all_grad, all_hess
:param labels: numpy.ndarray of shape (size_data, ) :param preds: :param group: # numpy.ndarray of shape (num_queries, ) :return:
lightgbm_custom_obj_ranknet
python
wildltr/ptranking
ptranking/ltr_tree/util/lightgbm_util.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/util/lightgbm_util.py
MIT
def lightgbm_custom_obj_ranknet_fobj(preds, train_data): ''' The traditional ranknet :param preds: numpy.ndarray of shape (size_data, ) :param train_data: :return: ''' all_labels = train_data.get_label() # numpy.ndarray of shape (size_data, ) group = train_data.get_group() # numpy.ndarray of shape (num_queries, ) size_data = len(all_labels) if FIRST_ORDER: all_grad, all_hess = np.zeros((size_data,)), np.full((size_data,), fill_value=CONSTANT_HESSIAN) else: all_grad, all_hess = np.zeros((size_data,)), np.zeros((size_data,)) head = 0 for num_docs_per_query in group.astype(np.int): labels_per_query = all_labels[head:head + num_docs_per_query] preds_per_query = preds[head:head + num_docs_per_query] grad_per_query, hess_per_query = per_query_gradient_hessian_lambda(preds=preds_per_query, labels=labels_per_query, first_order=FIRST_ORDER, pair_type='All', epsilon=1.0, weighting=False) all_grad[head:head + num_docs_per_query] = grad_per_query if not FIRST_ORDER: all_hess[head:head + num_docs_per_query] = hess_per_query head += num_docs_per_query return all_grad, all_hess
The traditional ranknet :param preds: numpy.ndarray of shape (size_data, ) :param train_data: :return:
lightgbm_custom_obj_ranknet_fobj
python
wildltr/ptranking
ptranking/ltr_tree/util/lightgbm_util.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/util/lightgbm_util.py
MIT
def lightgbm_custom_obj_lambdarank(labels=None, preds=None, group=None): """ :param labels: numpy.ndarray of shape (size_data, ) :param preds: :param group: numpy.ndarray of shape (num_queries, ) :return: """ size_data = len(labels) if FIRST_ORDER: all_grad, all_hess = np.zeros((size_data,)), np.full((size_data,), fill_value=CONSTANT_HESSIAN) else: all_grad, all_hess = np.zeros((size_data,)), np.zeros((size_data,)) head = 0 for num_docs_per_query in group.astype(np.int): labels_per_query = labels[head:head + num_docs_per_query] preds_per_query = preds[head:head + num_docs_per_query] grad_per_query, hess_per_query = per_query_gradient_hessian_lambda(preds=preds_per_query, labels=labels_per_query, first_order=FIRST_ORDER, pair_type='NoTies', epsilon=1.0, weighting=True, weighting_type='DeltaNDCG') all_grad[head:head + num_docs_per_query] = grad_per_query if not FIRST_ORDER: all_hess[head:head + num_docs_per_query] = hess_per_query head += num_docs_per_query return all_grad, all_hess
:param labels: numpy.ndarray of shape (size_data, ) :param preds: :param group: numpy.ndarray of shape (num_queries, ) :return:
lightgbm_custom_obj_lambdarank
python
wildltr/ptranking
ptranking/ltr_tree/util/lightgbm_util.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/util/lightgbm_util.py
MIT
def lightgbm_custom_obj_lambdarank_fobj(preds, train_data): ''' :param preds: numpy.ndarray of shape (size_data, ) :param train_data: :return: ''' all_labels = train_data.get_label() # numpy.ndarray of shape (size_data, ) group = train_data.get_group() # numpy.ndarray of shape (num_queries, ) size_data = len(all_labels) if FIRST_ORDER: all_grad, all_hess = np.zeros((size_data,)), np.full((size_data,), fill_value=CONSTANT_HESSIAN) else: all_grad, all_hess = np.zeros((size_data,)), np.zeros((size_data,)) head = 0 for num_docs_per_query in group.astype(np.int): labels_per_query = all_labels[head:head + num_docs_per_query] preds_per_query = preds[head:head + num_docs_per_query] grad_per_query, hess_per_query = per_query_gradient_hessian_lambda(preds=preds_per_query, labels=labels_per_query, first_order=FIRST_ORDER, pair_type='NoTies', epsilon=1.0, weighting=True, weighting_type='DeltaNDCG') all_grad[head:head + num_docs_per_query] = grad_per_query if not FIRST_ORDER: all_hess[head:head + num_docs_per_query] = hess_per_query head += num_docs_per_query return all_grad, all_hess
:param preds: numpy.ndarray of shape (size_data, ) :param train_data: :return:
lightgbm_custom_obj_lambdarank_fobj
python
wildltr/ptranking
ptranking/ltr_tree/util/lightgbm_util.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/util/lightgbm_util.py
MIT
def per_query_gradient_hessian_listnet(preds=None, labels=None, gain_type='Power', first_order=False): ''' Compute the corresponding gradient & hessian cf. LightGBM https://github.com/microsoft/LightGBM/blob/master/src/objective/rank_objective.hpp cf. XGBoost https://github.com/dmlc/xgboost/blob/master/src/objective/rank_obj.cc :param preds: 1-dimension predicted scores :param labels: 1-dimension ground truth :return: ''' assert gain_type in GAIN_TYPE if 'Power' == gain_type: gains = np.power(2.0, labels) - 1.0 elif 'Label' == gain_type: gains = labels p_pred, p_truth = _softmax(preds), _softmax(gains) grad = p_pred - p_truth hess = None if first_order else p_pred * (1.0-p_pred) return grad, hess
Compute the corresponding gradient & hessian cf. LightGBM https://github.com/microsoft/LightGBM/blob/master/src/objective/rank_objective.hpp cf. XGBoost https://github.com/dmlc/xgboost/blob/master/src/objective/rank_obj.cc :param preds: 1-dimension predicted scores :param labels: 1-dimension ground truth :return:
per_query_gradient_hessian_listnet
python
wildltr/ptranking
ptranking/ltr_tree/util/lightgbm_util.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/util/lightgbm_util.py
MIT
def lightgbm_custom_obj_listnet(labels=None, preds=None, group=None): """ :param labels: numpy.ndarray of shape (size_data, ) :param preds: numpy.ndarray of shape (size_data, ) :param group: numpy.ndarray of shape (num_queries, ) :return: """ size_data = len(labels) if FIRST_ORDER: all_grad, all_hess = np.zeros((size_data,)), np.full((size_data,), fill_value=CONSTANT_HESSIAN) else: all_grad, all_hess = np.zeros((size_data,)), np.zeros((size_data,)) head = 0 for num_docs_per_query in group.astype(np.int): labels_per_query = labels[head:head + num_docs_per_query] preds_per_query = preds[head:head + num_docs_per_query] grad_per_query, hess_per_query = per_query_gradient_hessian_listnet(preds=preds_per_query, labels=labels_per_query, gain_type='Power', first_order=FIRST_ORDER) all_grad[head:head + num_docs_per_query] = grad_per_query if not FIRST_ORDER: all_hess[head:head + num_docs_per_query] = hess_per_query head += num_docs_per_query return all_grad, all_hess
:param labels: numpy.ndarray of shape (size_data, ) :param preds: numpy.ndarray of shape (size_data, ) :param group: numpy.ndarray of shape (num_queries, ) :return:
lightgbm_custom_obj_listnet
python
wildltr/ptranking
ptranking/ltr_tree/util/lightgbm_util.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/util/lightgbm_util.py
MIT
def lightgbm_custom_obj_listnet_fobj(preds, train_data): ''' :param preds: numpy.ndarray of shape (size_data, ) :param train_data: :return: ''' all_labels = train_data.get_label() # numpy.ndarray of shape (size_data, ) group = train_data.get_group() # numpy.ndarray of shape (num_queries, ) size_data = len(all_labels) if FIRST_ORDER: all_grad, all_hess = np.zeros((size_data,)), np.full((size_data,), fill_value=CONSTANT_HESSIAN) else: all_grad, all_hess = np.zeros((size_data,)), np.zeros((size_data,)) head = 0 for num_docs_per_query in group.astype(np.int): labels_per_query = all_labels[head:head + num_docs_per_query] preds_per_query = preds[head:head + num_docs_per_query] grad_per_query, hess_per_query = per_query_gradient_hessian_listnet(preds=preds_per_query, labels=labels_per_query, gain_type='Power', first_order=FIRST_ORDER) # Power, Label all_grad[head:head + num_docs_per_query] = grad_per_query if not FIRST_ORDER: all_hess[head:head + num_docs_per_query] = hess_per_query head += num_docs_per_query return all_grad, all_hess
:param preds: numpy.ndarray of shape (size_data, ) :param train_data: :return:
lightgbm_custom_obj_listnet_fobj
python
wildltr/ptranking
ptranking/ltr_tree/util/lightgbm_util.py
https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/util/lightgbm_util.py
MIT
def get_delta_ndcg(batch_ideal_rankings, batch_predict_rankings, label_type=LABEL_TYPE.MultiLabel, device='cpu'): ''' Delta-nDCG w.r.t. pairwise swapping of the currently predicted ltr_adhoc :param batch_ideal_rankings: the standard labels sorted in a descending order :param batch_predicted_rankings: the standard labels sorted based on the corresponding predictions :return: ''' # ideal discount cumulative gains batch_idcgs = torch_dcg_at_k(batch_rankings=batch_ideal_rankings, label_type=label_type, device=device) if LABEL_TYPE.MultiLabel == label_type: batch_gains = torch.pow(2.0, batch_predict_rankings) - 1.0 elif LABEL_TYPE.Permutation == label_type: batch_gains = batch_predict_rankings else: raise NotImplementedError batch_n_gains = batch_gains / batch_idcgs # normalised gains batch_ng_diffs = torch.unsqueeze(batch_n_gains, dim=2) - torch.unsqueeze(batch_n_gains, dim=1) batch_std_ranks = torch.arange(batch_predict_rankings.size(1), dtype=torch.float, device=device) batch_dists = 1.0 / torch.log2(batch_std_ranks + 2.0) # discount co-efficients batch_dists = torch.unsqueeze(batch_dists, dim=0) batch_dists_diffs = torch.unsqueeze(batch_dists, dim=2) - torch.unsqueeze(batch_dists, dim=1) batch_delta_ndcg = torch.abs(batch_ng_diffs) * torch.abs(batch_dists_diffs) # absolute changes w.r.t. pairwise swapping return batch_delta_ndcg
Delta-nDCG w.r.t. pairwise swapping of the currently predicted ltr_adhoc :param batch_ideal_rankings: the standard labels sorted in a descending order :param batch_predicted_rankings: the standard labels sorted based on the corresponding predictions :return:
get_delta_ndcg
python
wildltr/ptranking
ptranking/metric/metric_utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/metric/metric_utils.py
MIT
def metric_results_to_string(list_scores=None, list_cutoffs=None, split_str=', ', metric='nDCG'): """ Convert metric results to a string representation :param list_scores: :param list_cutoffs: :param split_str: :return: """ list_str = [] for i in range(len(list_scores)): list_str.append(metric + '@{}:{:.4f}'.format(list_cutoffs[i], list_scores[i])) return split_str.join(list_str)
Convert metric results to a string representation :param list_scores: :param list_cutoffs: :param split_str: :return:
metric_results_to_string
python
wildltr/ptranking
ptranking/metric/metric_utils.py
https://github.com/wildltr/ptranking/blob/master/ptranking/metric/metric_utils.py
MIT
def torch_precision_at_k(batch_predict_rankings, k=None, device='cpu'): ''' Precision at k :param batch_predict_rankings: [batch_size, ranking_size] each ranking consists of labels corresponding to the ranked predictions :param k: cutoff value ''' max_cutoff = batch_predict_rankings.size(1) used_cutoff = min(max_cutoff, k) batch_sys_sorted_labels = batch_predict_rankings[:, 0:used_cutoff] batch_bi_sys_sorted_labels = torch.clamp(batch_sys_sorted_labels, min=0, max=1) # binary batch_sys_cumsum_reles = torch.cumsum(batch_bi_sys_sorted_labels, dim=1) batch_ranks = (torch.arange(used_cutoff, dtype=torch.float, device=device).expand_as(batch_sys_cumsum_reles) + 1.0) batch_sys_rankwise_precision = batch_sys_cumsum_reles / batch_ranks batch_sys_p_at_k = batch_sys_rankwise_precision[:, used_cutoff-1:used_cutoff] return batch_sys_p_at_k
Precision at k :param batch_predict_rankings: [batch_size, ranking_size] each ranking consists of labels corresponding to the ranked predictions :param k: cutoff value
torch_precision_at_k
python
wildltr/ptranking
ptranking/metric/adhoc/adhoc_metric.py
https://github.com/wildltr/ptranking/blob/master/ptranking/metric/adhoc/adhoc_metric.py
MIT
def torch_precision_at_ks(batch_predict_rankings, ks=None, device='cpu'): ''' Precision at ks :param batch_predict_rankings: [batch_size, ranking_size] each ranking consists of labels corresponding to the ranked predictions :param ks: cutoff values :return: [batch_size, len(ks)] ''' valid_max_cutoff = batch_predict_rankings.size(1) need_padding = True if valid_max_cutoff < max(ks) else False used_ks = [k for k in ks if k <= valid_max_cutoff] if need_padding else ks max_cutoff = max(used_ks) inds = torch.from_numpy(np.asarray(used_ks) - 1) batch_sys_sorted_labels = batch_predict_rankings[:, 0:max_cutoff] batch_bi_sys_sorted_labels = torch.clamp(batch_sys_sorted_labels, min=0, max=1) # binary batch_sys_cumsum_reles = torch.cumsum(batch_bi_sys_sorted_labels, dim=1) batch_ranks = (torch.arange(max_cutoff, dtype=torch.float, device=device).expand_as(batch_sys_cumsum_reles) + 1.0) batch_sys_rankwise_precision = batch_sys_cumsum_reles / batch_ranks batch_sys_p_at_ks = batch_sys_rankwise_precision[:, inds] if need_padding: padded_p_at_ks = torch.zeros(batch_sys_sorted_labels.size(0), len(ks)) padded_p_at_ks[:, 0:len(used_ks)] = batch_sys_p_at_ks return padded_p_at_ks else: return batch_sys_p_at_ks
Precision at ks :param batch_predict_rankings: [batch_size, ranking_size] each ranking consists of labels corresponding to the ranked predictions :param ks: cutoff values :return: [batch_size, len(ks)]
torch_precision_at_ks
python
wildltr/ptranking
ptranking/metric/adhoc/adhoc_metric.py
https://github.com/wildltr/ptranking/blob/master/ptranking/metric/adhoc/adhoc_metric.py
MIT
def torch_ap_at_k(batch_predict_rankings, batch_ideal_rankings, k=None, device='cpu'): ''' AP(average precision) at ks (i.e., different cutoff values) :param batch_ideal_rankings: [batch_size, ranking_size] the ideal ltr_adhoc of labels :param batch_predict_rankings: [batch_size, ranking_size] system's predicted ltr_adhoc of labels in a descending order :param ks: :return: [batch_size, len(ks)] ''' max_cutoff = batch_predict_rankings.size(1) used_cutoff = min(max_cutoff, k) batch_sys_sorted_labels = batch_predict_rankings[:, 0:used_cutoff] batch_bi_sys_sorted_labels = torch.clamp(batch_sys_sorted_labels, min=0, max=1) # binary batch_sys_cumsum_reles = torch.cumsum(batch_bi_sys_sorted_labels, dim=1) batch_ranks = (torch.arange(used_cutoff, dtype=torch.float, device=device).expand_as(batch_sys_cumsum_reles) + 1.0) batch_sys_rankwise_precision = batch_sys_cumsum_reles / batch_ranks # rank-wise precision batch_sys_cumsum_precision = torch.cumsum(batch_sys_rankwise_precision * batch_bi_sys_sorted_labels, dim=1) # exclude precisions of which the corresponding documents are not relevant batch_std_cumsum_reles = torch.cumsum(batch_ideal_rankings, dim=1) batch_sys_rankwise_ap = batch_sys_cumsum_precision / batch_std_cumsum_reles[:, 0:used_cutoff] batch_sys_ap_at_k = batch_sys_rankwise_ap[:, used_cutoff-1:used_cutoff] return batch_sys_ap_at_k
AP(average precision) at ks (i.e., different cutoff values) :param batch_ideal_rankings: [batch_size, ranking_size] the ideal ltr_adhoc of labels :param batch_predict_rankings: [batch_size, ranking_size] system's predicted ltr_adhoc of labels in a descending order :param ks: :return: [batch_size, len(ks)]
torch_ap_at_k
python
wildltr/ptranking
ptranking/metric/adhoc/adhoc_metric.py
https://github.com/wildltr/ptranking/blob/master/ptranking/metric/adhoc/adhoc_metric.py
MIT
def torch_ap_at_ks(batch_predict_rankings, batch_ideal_rankings, ks=None, device='cpu'): ''' AP(average precision) at ks (i.e., different cutoff values) :param batch_ideal_rankings: [batch_size, ranking_size] the ideal ltr_adhoc of labels :param batch_predict_rankings: [batch_size, ranking_size] system's predicted ltr_adhoc of labels in a descending order :param ks: :return: [batch_size, len(ks)] ''' valid_max_cutoff = batch_predict_rankings.size(1) need_padding = True if valid_max_cutoff < max(ks) else False used_ks = [k for k in ks if k <= valid_max_cutoff] if need_padding else ks max_cutoff = max(used_ks) inds = torch.from_numpy(np.asarray(used_ks) - 1) batch_sys_sorted_labels = batch_predict_rankings[:, 0:max_cutoff] batch_bi_sys_sorted_labels = torch.clamp(batch_sys_sorted_labels, min=0, max=1) # binary batch_sys_cumsum_reles = torch.cumsum(batch_bi_sys_sorted_labels, dim=1) batch_ranks = (torch.arange(max_cutoff, dtype=torch.float, device=device).expand_as(batch_sys_cumsum_reles) + 1.0) batch_sys_rankwise_precision = batch_sys_cumsum_reles / batch_ranks # rank-wise precision batch_sys_cumsum_precision = torch.cumsum(batch_sys_rankwise_precision * batch_bi_sys_sorted_labels, dim=1) # exclude precisions of which the corresponding documents are not relevant batch_std_cumsum_reles = torch.cumsum(batch_ideal_rankings, dim=1) batch_sys_rankwise_ap = batch_sys_cumsum_precision / batch_std_cumsum_reles[:, 0:max_cutoff] batch_sys_ap_at_ks = batch_sys_rankwise_ap[:, inds] if need_padding: padded_ap_at_ks = torch.zeros(batch_sys_sorted_labels.size(0), len(ks)) padded_ap_at_ks[:, 0:len(used_ks)] = batch_sys_ap_at_ks return padded_ap_at_ks else: return batch_sys_ap_at_ks
AP(average precision) at ks (i.e., different cutoff values) :param batch_ideal_rankings: [batch_size, ranking_size] the ideal ltr_adhoc of labels :param batch_predict_rankings: [batch_size, ranking_size] system's predicted ltr_adhoc of labels in a descending order :param ks: :return: [batch_size, len(ks)]
torch_ap_at_ks
python
wildltr/ptranking
ptranking/metric/adhoc/adhoc_metric.py
https://github.com/wildltr/ptranking/blob/master/ptranking/metric/adhoc/adhoc_metric.py
MIT
def torch_nerr_at_ks(batch_predict_rankings, batch_ideal_rankings, ks=None, device='cpu', label_type=LABEL_TYPE.MultiLabel, max_label=None): ''' :param batch_predict_rankings: [batch_size, ranking_size] the standard labels sorted in descending order according to predicted relevance scores :param ks: :return: [batch_size, len(ks)] ''' valid_max_cutoff = batch_predict_rankings.size(1) need_padding = True if valid_max_cutoff < max(ks) else False used_ks = [k for k in ks if k <= valid_max_cutoff] if need_padding else ks if max_label is None: max_label = torch.max(batch_ideal_rankings) max_cutoff = max(used_ks) inds = torch.from_numpy(np.asarray(used_ks) - 1) if LABEL_TYPE.MultiLabel == label_type: batch_sys_rankwise_err = torch_rankwise_err(batch_predict_rankings, max_label=max_label, k=max_cutoff, point=False, device=device) batch_ideal_rankwise_err = torch_rankwise_err(batch_ideal_rankings, max_label=max_label, k=max_cutoff, point=False, device=device) batch_rankwise_nerr = batch_sys_rankwise_err/batch_ideal_rankwise_err batch_nerr_at_ks = batch_rankwise_nerr[:, inds] if need_padding: padded_nerr_at_ks = torch.zeros(batch_predict_rankings.size(0), len(ks)) padded_nerr_at_ks[:, 0:len(used_ks)] = batch_nerr_at_ks return padded_nerr_at_ks else: return batch_nerr_at_ks else: raise NotImplementedError
:param batch_predict_rankings: [batch_size, ranking_size] the standard labels sorted in descending order according to predicted relevance scores :param ks: :return: [batch_size, len(ks)]
torch_nerr_at_ks
python
wildltr/ptranking
ptranking/metric/adhoc/adhoc_metric.py
https://github.com/wildltr/ptranking/blob/master/ptranking/metric/adhoc/adhoc_metric.py
MIT
def torch_dcg_at_k(batch_rankings, cutoff=None, label_type=LABEL_TYPE.MultiLabel, device='cpu'): ''' ICML-nDCG, which places stronger emphasis on retrieving relevant documents :param batch_rankings: [batch_size, ranking_size] rankings of labels (either standard or predicted by a system) :param cutoff: the cutoff position :param label_type: either the case of multi-level relevance or the case of listwise int-value, e.g., MQ2007-list :return: [batch_size, 1] cumulative gains for each rank position ''' if cutoff is None: # using whole list cutoff = batch_rankings.size(1) if LABEL_TYPE.MultiLabel == label_type: #the common case with multi-level labels batch_numerators = torch.pow(2.0, batch_rankings[:, 0:cutoff]) - 1.0 elif LABEL_TYPE.Permutation == label_type: # the case like listwise ltr_adhoc, where the relevance is labeled as (n-rank_position) batch_numerators = batch_rankings[:, 0:cutoff] else: raise NotImplementedError # no expanding should also be OK due to the default broadcasting batch_discounts = torch.log2(torch.arange(cutoff, dtype=torch.float, device=device).expand_as(batch_numerators) + 2.0) batch_dcg_at_k = torch.sum(batch_numerators/batch_discounts, dim=1, keepdim=True) return batch_dcg_at_k
ICML-nDCG, which places stronger emphasis on retrieving relevant documents :param batch_rankings: [batch_size, ranking_size] rankings of labels (either standard or predicted by a system) :param cutoff: the cutoff position :param label_type: either the case of multi-level relevance or the case of listwise int-value, e.g., MQ2007-list :return: [batch_size, 1] cumulative gains for each rank position
torch_dcg_at_k
python
wildltr/ptranking
ptranking/metric/adhoc/adhoc_metric.py
https://github.com/wildltr/ptranking/blob/master/ptranking/metric/adhoc/adhoc_metric.py
MIT
def torch_dcg_at_ks(batch_rankings, max_cutoff, label_type=LABEL_TYPE.MultiLabel, device='cpu'): ''' :param batch_rankings: [batch_size, ranking_size] rankings of labels (either standard or predicted by a system) :param max_cutoff: the maximum cutoff value :param label_type: either the case of multi-level relevance or the case of listwise int-value, e.g., MQ2007-list :return: [batch_size, max_cutoff] cumulative gains for each rank position ''' if LABEL_TYPE.MultiLabel == label_type: # the common case with multi-level labels batch_numerators = torch.pow(2.0, batch_rankings[:, 0:max_cutoff]) - 1.0 elif LABEL_TYPE.Permutation == label_type: # the case like listwise ltr_adhoc, where the relevance is labeled as (n-rank_position) batch_numerators = batch_rankings[:, 0:max_cutoff] else: raise NotImplementedError batch_discounts = torch.log2(torch.arange(max_cutoff, dtype=torch.float, device=device).expand_as(batch_numerators) + 2.0) batch_dcg_at_ks = torch.cumsum(batch_numerators/batch_discounts, dim=1) # dcg w.r.t. each position return batch_dcg_at_ks
:param batch_rankings: [batch_size, ranking_size] rankings of labels (either standard or predicted by a system) :param max_cutoff: the maximum cutoff value :param label_type: either the case of multi-level relevance or the case of listwise int-value, e.g., MQ2007-list :return: [batch_size, max_cutoff] cumulative gains for each rank position
torch_dcg_at_ks
python
wildltr/ptranking
ptranking/metric/adhoc/adhoc_metric.py
https://github.com/wildltr/ptranking/blob/master/ptranking/metric/adhoc/adhoc_metric.py
MIT
def np_metric_at_ks(ranker=None, test_Qs=None, ks=[1, 5, 10], label_type=LABEL_TYPE.MultiLabel, max_rele_level=None, gpu=False, device=None): ''' There is no check based on the assumption (say light_filtering() is called) that each test instance Q includes at least k(k=max(ks)) documents, and at least one relevant document. Or there will be errors. ''' cnt = 0 sum_ndcg_at_ks = torch.zeros(len(ks)) sum_err_at_ks = torch.zeros(len(ks)) sum_ap_at_ks = torch.zeros(len(ks)) sum_p_at_ks = torch.zeros(len(ks)) list_ndcg_at_ks_per_q = [] list_err_at_ks_per_q = [] list_ap_at_ks_per_q = [] list_p_at_ks_per_q = [] for entry in test_Qs: tor_test_ranking, tor_test_std_label_vec = entry[1], torch.squeeze(entry[2], dim=0) # remove the size 1 of dim=0 from loader itself if gpu: tor_rele_pred = ranker.predict(tor_test_ranking.to(device)) tor_rele_pred = torch.squeeze(tor_rele_pred) tor_rele_pred = tor_rele_pred.cpu() else: tor_rele_pred = ranker.predict(tor_test_ranking) tor_rele_pred = torch.squeeze(tor_rele_pred) _, tor_sorted_inds = torch.sort(tor_rele_pred, descending=True) sys_sorted_labels = tor_test_std_label_vec[tor_sorted_inds] ideal_sorted_labels, _ = torch.sort(tor_test_std_label_vec, descending=True) ndcg_at_ks_per_query = torch_ndcg_at_ks(sys_sorted_labels=sys_sorted_labels, ideal_sorted_labels=ideal_sorted_labels, ks=ks, label_type=label_type) sum_ndcg_at_ks = torch.add(sum_ndcg_at_ks, ndcg_at_ks_per_query) list_ndcg_at_ks_per_q.append(ndcg_at_ks_per_query.numpy()) err_at_ks_per_query = torch_nerr_at_ks(sys_sorted_labels, ideal_sorted_labels=ideal_sorted_labels, ks=ks, label_type=label_type) sum_err_at_ks = torch.add(sum_err_at_ks, err_at_ks_per_query) list_err_at_ks_per_q.append(err_at_ks_per_query.numpy()) ap_at_ks_per_query = torch_ap_at_ks(sys_sorted_labels=sys_sorted_labels, ideal_sorted_labels=ideal_sorted_labels, ks=ks) sum_ap_at_ks = torch.add(sum_ap_at_ks, ap_at_ks_per_query) list_ap_at_ks_per_q.append(ap_at_ks_per_query.numpy()) p_at_ks_per_query = torch_precision_at_ks(sys_sorted_labels=sys_sorted_labels, ks=ks) sum_p_at_ks = torch.add(sum_p_at_ks, p_at_ks_per_query) list_p_at_ks_per_q.append(p_at_ks_per_query.numpy()) cnt += 1 ndcg_at_ks = sum_ndcg_at_ks/cnt err_at_ks = sum_err_at_ks/cnt ap_at_ks = sum_ap_at_ks / cnt p_at_ks = sum_p_at_ks/cnt return ndcg_at_ks.numpy(), err_at_ks.numpy(), ap_at_ks.numpy(), p_at_ks.numpy(), list_ndcg_at_ks_per_q, list_err_at_ks_per_q, list_ap_at_ks_per_q, list_p_at_ks_per_q
There is no check based on the assumption (say light_filtering() is called) that each test instance Q includes at least k(k=max(ks)) documents, and at least one relevant document. Or there will be errors.
np_metric_at_ks
python
wildltr/ptranking
ptranking/metric/adhoc/adhoc_metric.py
https://github.com/wildltr/ptranking/blob/master/ptranking/metric/adhoc/adhoc_metric.py
MIT
def precision_as_opt_objective(top_k=None, batch_smooth_ranks=None, batch_std_labels=None, presort=False, opt_ideal=False, device=None): ''' Precision expectation maximization. @param top_k: only use the top-k results if not None @param batch_std_labels: @param presort: whether the standard labels are already sorted in descending order or not @param opt_ideal: optimise the ideal ranking or sort results each time @return: ''' ''' According to the derivation of differentiable ranks, they are inversely correlated w.r.t. the scoring function. Thus should be used as the denominator. For being used as the numerator, i.e., {batch_precision = torch.sum(batch_ascend_expt_ranks/batch_natural_ranks*top_k_labels, dim=1)/k}, we need to revise the related formulations. ''' ranking_size = batch_std_labels.size(1) batch_bi_std_labels = torch.clamp(batch_std_labels, min=0, max=1) # use binary labels # 1-dimension vector as batch via broadcasting batch_natural_ranks = torch.arange(ranking_size, dtype=torch.float, device=device).view(1, -1) + 1.0 #batch_expt_ranks = get_expected_rank(batch_mus=batch_mus, batch_vars=batch_vars, batch_cocos=batch_cocos) if opt_ideal: # TODO adding dynamic shuffle is better, otherwise it will always be one order assert presort is True if top_k is None: # using cutoff batch_precision = torch.sum(batch_natural_ranks/batch_smooth_ranks*batch_bi_std_labels, dim=1)/ranking_size else: batch_precision = torch.sum(batch_natural_ranks[:, 0:top_k]/batch_smooth_ranks[:, 0:top_k] *batch_bi_std_labels[:, 0:top_k], dim=1) / top_k precision_loss = -torch.sum(batch_precision) zero_metric_value = False # there should be no zero value due to pre-filtering, i.e., min_rele=1 return precision_loss, zero_metric_value else: # recommended than opt_ideal, due to the nature of dynamic ranking ''' Sort the predicted ranks in a ascending natural order (i.e., 1, 2, 3, ..., n), the returned indices can be used to sort other vectors following the predicted order ''' batch_ascend_expt_ranks, sort_indices = torch.sort(batch_smooth_ranks, dim=1, descending=False) # sort labels according to the expected ranks batch_sys_std_labels = torch.gather(batch_bi_std_labels, dim=1, index=sort_indices) if top_k is None: # using cutoff batch_precision = torch.sum(batch_natural_ranks / batch_ascend_expt_ranks * batch_sys_std_labels, dim=1) / ranking_size precision_loss = -torch.sum(batch_precision) return precision_loss, False else: top_k_labels = batch_sys_std_labels[:, 0:top_k] non_zero_inds = torch.nonzero(torch.sum(top_k_labels, dim=1)) zero_metric_value = False if non_zero_inds.size(0) > 0 else True if zero_metric_value: return None, zero_metric_value else: pos_inds = non_zero_inds[:, 0] batch_precision = torch.sum(batch_natural_ranks[:, 0:top_k] / batch_ascend_expt_ranks[pos_inds, 0:top_k] * top_k_labels[pos_inds, :], dim=1) / top_k precision_loss = -torch.sum(batch_precision) return precision_loss, zero_metric_value
Precision expectation maximization. @param top_k: only use the top-k results if not None @param batch_std_labels: @param presort: whether the standard labels are already sorted in descending order or not @param opt_ideal: optimise the ideal ranking or sort results each time @return:
precision_as_opt_objective
python
wildltr/ptranking
ptranking/metric/smooth_metric/metric_as_opt_objective.py
https://github.com/wildltr/ptranking/blob/master/ptranking/metric/smooth_metric/metric_as_opt_objective.py
MIT
def get_delta_alpha_dcg(ideal_q_doc_rele_mat=None, sys_q_doc_rele_mat=None, alpha=0.5, device='cpu', normalization=True): ''' Get the delta-nDCG w.r.t. pairwise swapping of the currently predicted order. @param ideal_q_doc_rele_mat: the standard labels sorted in an ideal order @param sys_q_doc_rele_mat: the standard labels sorted based on the corresponding predictions @param alpha: @param device: @return: ''' num_subtopics, ranking_size = sys_q_doc_rele_mat.size() if normalization: ideal_alpha_DCG = torch_alpha_dcg_at_k(sorted_q_doc_rele_mat=ideal_q_doc_rele_mat, k=ranking_size, alpha=alpha, device=device) prior_rele_mat = torch.zeros_like(sys_q_doc_rele_mat) prior_rele_mat[:, 1:ranking_size] = sys_q_doc_rele_mat[:, 0:ranking_size - 1] # the times of being covered for 1st doc should be zero prior_cover_cnts = torch.cumsum(prior_rele_mat, dim=1) subtopic_user_focus = torch.pow((1.0 - alpha), prior_cover_cnts) subtopic_gains = torch.pow(2.0, sys_q_doc_rele_mat) - 1.0 subtopic_gain_diffs = torch.unsqueeze(subtopic_gains, dim=2) - torch.unsqueeze(subtopic_gains, dim=1) ranks = torch.arange(ranking_size, dtype=torch.float, device=device) rank_discounts = 1.0 / torch.log2(ranks + 2.0) # discount co-efficients subtopic_user_focus_1st = torch.unsqueeze(subtopic_user_focus, dim=2).expand(-1, -1, ranking_size) rank_discounts_1st = rank_discounts.view(1, -1, 1) subtopic_coffs_1st = rank_discounts_1st * subtopic_user_focus_1st subtopic_user_focus_2nd = torch.unsqueeze(subtopic_user_focus, dim=1).expand(-1, ranking_size, -1) rank_discounts_2nd = rank_discounts.view(1, 1, -1) subtopic_coffs_2nd = rank_discounts_2nd * subtopic_user_focus_2nd # absolute changes w.r.t. pairwise swapping delta_alpha_DCG = torch.abs(torch.sum(subtopic_gain_diffs*subtopic_coffs_1st, dim=0) - torch.sum(subtopic_gain_diffs*subtopic_coffs_2nd, dim=0)) if normalization: return delta_alpha_DCG/ideal_alpha_DCG else: return delta_alpha_DCG
Get the delta-nDCG w.r.t. pairwise swapping of the currently predicted order. @param ideal_q_doc_rele_mat: the standard labels sorted in an ideal order @param sys_q_doc_rele_mat: the standard labels sorted based on the corresponding predictions @param alpha: @param device: @return:
get_delta_alpha_dcg
python
wildltr/ptranking
ptranking/metric/srd/diversity_metric.py
https://github.com/wildltr/ptranking/blob/master/ptranking/metric/srd/diversity_metric.py
MIT
def np_shuffle_ties(vec, descending=True): ''' namely, randomly permuate ties :param vec: :param descending: the sorting order w.r.t. the input vec :return: ''' if len(vec.shape) > 1: raise NotImplementedError else: length = vec.shape[0] perm = np.random.permutation(length) shuffled_vec = sorted(vec[perm], reverse=descending) return shuffled_vec
namely, randomly permuate ties :param vec: :param descending: the sorting order w.r.t. the input vec :return:
np_shuffle_ties
python
wildltr/ptranking
ptranking/utils/numpy/np_extensions.py
https://github.com/wildltr/ptranking/blob/master/ptranking/utils/numpy/np_extensions.py
MIT
def np_arg_shuffle_ties(vec, descending=True): ''' the same as np_shuffle_ties, but return the corresponding indice ''' if len(vec.shape) > 1: raise NotImplementedError else: length = vec.shape[0] perm = np.random.permutation(length) if descending: sorted_shuffled_vec_inds = np.argsort(-vec[perm]) else: sorted_shuffled_vec_inds = np.argsort(vec[perm]) shuffle_ties_inds = perm[sorted_shuffled_vec_inds] return shuffle_ties_inds
the same as np_shuffle_ties, but return the corresponding indice
np_arg_shuffle_ties
python
wildltr/ptranking
ptranking/utils/numpy/np_extensions.py
https://github.com/wildltr/ptranking/blob/master/ptranking/utils/numpy/np_extensions.py
MIT
def np_plackett_luce_sampling(items, probs, softmaxed=False): ''' sample a ltr_adhoc based on the Plackett-Luce model :param vec: a vector of values, the higher, the more possible the corresponding entry will be sampled :return: the indice of the corresponding ltr_adhoc ''' if softmaxed: ranking = np.random.choice(items, size=len(probs), p=probs, replace=False) else: probs = np_softmax(probs) ranking = np.random.choice(items, size=len(probs), p=probs, replace=False) return ranking
sample a ltr_adhoc based on the Plackett-Luce model :param vec: a vector of values, the higher, the more possible the corresponding entry will be sampled :return: the indice of the corresponding ltr_adhoc
np_plackett_luce_sampling
python
wildltr/ptranking
ptranking/utils/numpy/np_extensions.py
https://github.com/wildltr/ptranking/blob/master/ptranking/utils/numpy/np_extensions.py
MIT
def shuffle_ties(vec, descending=True): ''' namely, randomly permuate ties :param vec: :param descending: the sorting order w.r.t. the input vec :return: ''' if len(vec.size()) > 1: raise NotImplementedError else: length = vec.size()[0] perm = torch.randperm(length) shuffled_vec, _ = torch.sort(vec[perm], descending=descending) return shuffled_vec
namely, randomly permuate ties :param vec: :param descending: the sorting order w.r.t. the input vec :return:
shuffle_ties
python
wildltr/ptranking
ptranking/utils/pytorch/pt_extensions.py
https://github.com/wildltr/ptranking/blob/master/ptranking/utils/pytorch/pt_extensions.py
MIT
def arg_shuffle_ties(vec, descending=True): ''' the same as shuffle_ties, but return the corresponding indice ''' if len(vec.size()) > 1: raise NotImplementedError else: length = vec.size()[0] perm = torch.randperm(length) sorted_shuffled_vec_inds = torch.argsort(vec[perm], descending=descending) shuffle_ties_inds = perm[sorted_shuffled_vec_inds] return shuffle_ties_inds
the same as shuffle_ties, but return the corresponding indice
arg_shuffle_ties
python
wildltr/ptranking
ptranking/utils/pytorch/pt_extensions.py
https://github.com/wildltr/ptranking/blob/master/ptranking/utils/pytorch/pt_extensions.py
MIT
def plackett_luce_sampling(probs, softmaxed=False): ''' sample a ltr_adhoc based on the Plackett-Luce model :param vec: a vector of values, the higher, the more possible the corresponding entry will be sampled :return: the indice of the corresponding ltr_adhoc ''' if softmaxed: inds = torch.multinomial(probs, probs.size()[0], replacement=False) else: probs = F.softmax(probs, dim=0) inds = torch.multinomial(probs, probs.size()[0], replacement=False) return inds
sample a ltr_adhoc based on the Plackett-Luce model :param vec: a vector of values, the higher, the more possible the corresponding entry will be sampled :return: the indice of the corresponding ltr_adhoc
plackett_luce_sampling
python
wildltr/ptranking
ptranking/utils/pytorch/pt_extensions.py
https://github.com/wildltr/ptranking/blob/master/ptranking/utils/pytorch/pt_extensions.py
MIT
def soft_rank_sampling(loc, covariance_matrix=None, inds_style=True, descending=True): ''' :param loc: mean of the distribution :param covariance_matrix: positive-definite covariance matrix :param inds_style: true means the indice leading to the ltr_adhoc :return: ''' m = MultivariateNormal(loc, covariance_matrix) vals = m.sample() if inds_style: sorted_inds = torch.argsort(vals, descending=descending) return sorted_inds else: vals
:param loc: mean of the distribution :param covariance_matrix: positive-definite covariance matrix :param inds_style: true means the indice leading to the ltr_adhoc :return:
soft_rank_sampling
python
wildltr/ptranking
ptranking/utils/pytorch/pt_extensions.py
https://github.com/wildltr/ptranking/blob/master/ptranking/utils/pytorch/pt_extensions.py
MIT
def forward(ctx, MU, sigma, gpu): ''' :param ctx: :param mu: :param sigma: a float value :return: ''' #print('MU', MU) tmp_MU = MU.detach() tmp_MU = tmp_MU.view(1, -1) np_MU = tmp_MU.cpu().numpy() if gpu else tmp_MU.numpy() #print('np_MU', np_MU) np_integrated_probs = [quad(lambda y: ONEOVERSQRT2PI * np.exp(-0.5 * (y-mu/sigma) ** 2) / sigma, 0, np.inf)[0] for mu in np.ravel(np_MU)] #print('np_integrated_probs', np_integrated_probs) integrated_probs = torch.as_tensor(np_integrated_probs, dtype=MU.dtype) integrated_probs = integrated_probs.view(MU.size()) #print('integrated_probs', integrated_probs) ctx.save_for_backward(MU, torch.tensor([sigma])) return integrated_probs
:param ctx: :param mu: :param sigma: a float value :return:
forward
python
wildltr/ptranking
ptranking/utils/pytorch/pt_extensions.py
https://github.com/wildltr/ptranking/blob/master/ptranking/utils/pytorch/pt_extensions.py
MIT
def sinkhorn_2D(x, num_iter=5): ''' Sinkhorn (1964) showed that if X is a positive square matrix, there exist positive diagonal matrices D1 and D2 such that D1XD2 is doubly stochastic. The method of proof is based on an iterative procedure of alternatively normalizing the rows and columns of X. :param x: the given positive square matrix ''' for i in range(num_iter): x = torch.div(x, torch.sum(x, dim=1, keepdim=True)) x = torch.div(x, torch.sum(x, dim=0, keepdim=True)) return x
Sinkhorn (1964) showed that if X is a positive square matrix, there exist positive diagonal matrices D1 and D2 such that D1XD2 is doubly stochastic. The method of proof is based on an iterative procedure of alternatively normalizing the rows and columns of X. :param x: the given positive square matrix
sinkhorn_2D
python
wildltr/ptranking
ptranking/utils/pytorch/pt_extensions.py
https://github.com/wildltr/ptranking/blob/master/ptranking/utils/pytorch/pt_extensions.py
MIT
def logsumexp(inputs, dim=None, keepdim=False): """Numerically stable logsumexp. Args: inputs: A Variable with any shape. dim: An integer. keepdim: A boolean. Returns: Equivalent of log(sum(exp(inputs), dim=dim, keepdim=keepdim)). """ # For a 1-D array x (any array along a single dimension), # log sum exp(x) = s + log sum exp(x - s) # with s = max(x) being a common choice. if dim is None: inputs = inputs.view(-1) dim = 0 s, _ = torch.max(inputs, dim=dim, keepdim=True) outputs = s + (inputs - s).exp().sum(dim=dim, keepdim=True).log() if not keepdim: outputs = outputs.squeeze(dim) return outputs
Numerically stable logsumexp. Args: inputs: A Variable with any shape. dim: An integer. keepdim: A boolean. Returns: Equivalent of log(sum(exp(inputs), dim=dim, keepdim=keepdim)).
logsumexp
python
wildltr/ptranking
ptranking/utils/pytorch/pt_extensions.py
https://github.com/wildltr/ptranking/blob/master/ptranking/utils/pytorch/pt_extensions.py
MIT
def sinkhorn_batch_(batch_x, num_iter=20, eps=1e-10, tau=0.05): ''' Temperature (tau) -controlled Sinkhorn layer. By a theorem by Sinkhorn and Knopp [1], a sufficiently well-behaved matrix with positive entries can be turned into a doubly-stochastic matrix (i.e. its rows and columns add up to one) via the succesive row and column normalization. -To ensure positivity, the effective input to sinkhorn has to be exp(log_alpha) (elementwise). -However, for stability, sinkhorn works in the log-space. It is only at return time that entries are exponentiated. [1] Sinkhorn, Richard and Knopp, Paul. Concerning nonnegative matrices and doubly stochastic matrices. Pacific Journal of Mathematics, 1967 :param batch_x: a batch of square matrices, the restriction of 'positive' w.r.t. batch_x is not needed, since the exp() is deployed here. :param num_iter: number of sinkhorn iterations (in practice, as little as 20 iterations are needed to achieve decent convergence for N~100) :return: A 3D tensor of close-to-doubly-stochastic matrices (2D tensors are converted to 3D tensors with batch_size equals to 1) ''' if tau is not None: batch_x = batch_x/tau # as tau approaches zero(positive), the result is more like a permutation matrix for _ in range(num_iter): batch_x = batch_x - logsumexp(batch_x, dim=2, keepdim=True) # row normalirzation batch_x = batch_x - logsumexp(batch_x, dim=1, keepdim=True) # column normalization if (batch_x != batch_x).sum() > 0 or (batch_x != batch_x).sum() > 0 or batch_x.max() > 1e9 or batch_x.max() > 1e9: # u!=u is a test for NaN... break return torch.exp(batch_x) + eps # add a small offset 'eps' in order to avoid numerical errors due to exp()
Temperature (tau) -controlled Sinkhorn layer. By a theorem by Sinkhorn and Knopp [1], a sufficiently well-behaved matrix with positive entries can be turned into a doubly-stochastic matrix (i.e. its rows and columns add up to one) via the succesive row and column normalization. -To ensure positivity, the effective input to sinkhorn has to be exp(log_alpha) (elementwise). -However, for stability, sinkhorn works in the log-space. It is only at return time that entries are exponentiated. [1] Sinkhorn, Richard and Knopp, Paul. Concerning nonnegative matrices and doubly stochastic matrices. Pacific Journal of Mathematics, 1967 :param batch_x: a batch of square matrices, the restriction of 'positive' w.r.t. batch_x is not needed, since the exp() is deployed here. :param num_iter: number of sinkhorn iterations (in practice, as little as 20 iterations are needed to achieve decent convergence for N~100) :return: A 3D tensor of close-to-doubly-stochastic matrices (2D tensors are converted to 3D tensors with batch_size equals to 1)
sinkhorn_batch_
python
wildltr/ptranking
ptranking/utils/pytorch/pt_extensions.py
https://github.com/wildltr/ptranking/blob/master/ptranking/utils/pytorch/pt_extensions.py
MIT
def pl_normalize(batch_scores=None): ''' Normalization based on the 'Plackett_Luce' model :param batch_scores: [batch, ranking_size] :return: the i-th entry represents the probability of being ranked at the i-th position ''' m, _ = torch.max(batch_scores, dim=1, keepdim=True) # for higher stability y = batch_scores - m y = torch.exp(y) y_cumsum_t2h = torch.flip(torch.cumsum(torch.flip(y, dim=1), dim=1), dim=1) # row-wise cumulative sum, from tail to head batch_pros = torch.div(y, y_cumsum_t2h) return batch_pros
Normalization based on the 'Plackett_Luce' model :param batch_scores: [batch, ranking_size] :return: the i-th entry represents the probability of being ranked at the i-th position
pl_normalize
python
wildltr/ptranking
ptranking/utils/pytorch/pt_extensions.py
https://github.com/wildltr/ptranking/blob/master/ptranking/utils/pytorch/pt_extensions.py
MIT
def get_doc_num(dataset): ''' compute the number of documents in a dataset ''' doc_num = 0 for qid, torch_batch_rankings, torch_batch_std_labels in dataset: doc_num += torch_batch_std_labels.size(1) return doc_num
compute the number of documents in a dataset
get_doc_num
python
wildltr/ptranking
testing/data/testing_data_utils.py
https://github.com/wildltr/ptranking/blob/master/testing/data/testing_data_utils.py
MIT
def get_min_max_docs(train_dataset, vali_dataset, test_dataset, semi_supervised=False): ''' get the minimum / maximum number of documents per query ''' min_doc = 10000000 max_doc = 0 sum_rele = 0 if semi_supervised: sum_unknown = 0 for qid, torch_batch_rankings, torch_batch_std_labels in train_dataset: #print('torch_batch_std_labels', torch_batch_std_labels.size()) doc_num = torch_batch_std_labels.size(1) min_doc = min(doc_num, min_doc) max_doc = max(max_doc, doc_num) sum_rele += (torch_batch_std_labels>0).sum() if semi_supervised: sum_unknown += (torch_batch_std_labels<0).sum() if vali_dataset is not None: for qid, torch_batch_rankings, torch_batch_std_labels in vali_dataset: doc_num = torch_batch_std_labels.size(1) min_doc = min(doc_num, min_doc) max_doc = max(max_doc, doc_num) sum_rele += (torch_batch_std_labels>0).sum() if semi_supervised: sum_unknown += (torch_batch_std_labels < 0).sum() for qid, torch_batch_rankings, torch_batch_std_labels in test_dataset: doc_num = torch_batch_std_labels.size(1) min_doc = min(doc_num, min_doc) max_doc = max(max_doc, doc_num) sum_rele += (torch_batch_std_labels>0).sum() if semi_supervised: sum_unknown += (torch_batch_std_labels<0).sum() if semi_supervised: return min_doc, max_doc, sum_rele.data.numpy(), sum_unknown.data.numpy() else: return min_doc, max_doc, sum_rele.data.numpy()
get the minimum / maximum number of documents per query
get_min_max_docs
python
wildltr/ptranking
testing/data/testing_data_utils.py
https://github.com/wildltr/ptranking/blob/master/testing/data/testing_data_utils.py
MIT
def get_min_max_feature(train_dataset, vali_dataset, test_dataset): ''' get the minimum / maximum feature values in a dataset ''' min_f = 0 max_f = 1000 for qid, torch_batch_rankings, torch_batch_std_labels in train_dataset: mav = torch.max(torch_batch_rankings) if torch.isinf(mav): print(qid, mav) else: if mav > max_f: max_f = mav miv = torch.min(torch_batch_rankings) if miv < min_f: min_f = miv print('train', min_f, '\t', max_f) min_f = 0 max_f = 1000 for qid, torch_batch_rankings, torch_batch_std_labels in vali_dataset: mav = torch.max(torch_batch_rankings) if mav > max_f: max_f = mav miv = torch.min(torch_batch_rankings) if miv < min_f: min_f = miv print('vali', min_f, '\t', max_f) min_f = 0 max_f = 1000 for qid, torch_batch_rankings, torch_batch_std_labels in test_dataset: mav = torch.max(torch_batch_rankings) if mav > max_f: max_f = mav miv = torch.min(torch_batch_rankings) if miv < min_f: min_f = miv print('test', min_f, '\t', max_f)
get the minimum / maximum feature values in a dataset
get_min_max_feature
python
wildltr/ptranking
testing/data/testing_data_utils.py
https://github.com/wildltr/ptranking/blob/master/testing/data/testing_data_utils.py
MIT
def check_dataset_statistics(data_id, dir_data, buffer=False): ''' Get the basic statistics on the specified dataset ''' if data_id in YAHOO_LTR: data_prefix = dir_data + data_id.lower() + '.' file_train, file_vali, file_test = data_prefix + 'train.txt', data_prefix + 'valid.txt', data_prefix + 'test.txt' elif data_id in ISTELLA_LTR: data_prefix = dir_data + data_id + '/' if data_id == 'Istella_X' or data_id=='Istella_S': file_train, file_vali, file_test = data_prefix + 'train.txt', data_prefix + 'vali.txt', data_prefix + 'test.txt' else: file_train, file_test = data_prefix + 'train.txt', data_prefix + 'test.txt' else: fold_k = 1 fold_k_dir = dir_data + 'Fold' + str(fold_k) + '/' file_train, file_vali, file_test = fold_k_dir + 'train.txt', fold_k_dir + 'vali.txt', fold_k_dir + 'test.txt' # common if 'Istella' == data_id: train_dataset = LTRDataset(split_type=SPLIT_TYPE.Train, file=file_train, data_id=data_id, shuffle=False, buffer=buffer) test_dataset = LTRDataset(split_type=SPLIT_TYPE.Test, file=file_test, data_id=data_id, shuffle=False, buffer=buffer) num_queries = train_dataset.__len__() + test_dataset.__len__() print('Dataset:\t', data_id) print('Total queries:\t', num_queries) print('\tTrain:', train_dataset.__len__(), 'Test:', test_dataset.__len__()) num_docs = get_doc_num(train_dataset) + get_doc_num(test_dataset) print('Total docs:\t', num_docs) min_doc, max_doc, sum_rele = get_min_max_docs(train_dataset=train_dataset, vali_dataset=None, test_dataset=test_dataset) data_meta = get_data_meta(data_id=data_id) max_rele_label = data_meta['max_rele_level'] sum_bin_cnts = get_label_distribution(train_dataset=train_dataset, test_dataset=test_dataset, semi_supervised=False, max_lavel=max_rele_label) else: train_dataset = LTRDataset(split_type=SPLIT_TYPE.Train, file=file_train, data_id=data_id, buffer=buffer) vali_dataset = LTRDataset(split_type=SPLIT_TYPE.Validation, file=file_vali, data_id=data_id, buffer=buffer) test_dataset = LTRDataset(split_type=SPLIT_TYPE.Test, file=file_test, data_id=data_id, buffer=buffer) num_queries = train_dataset.__len__() + vali_dataset.__len__() + test_dataset.__len__() print('Dataset:\t', data_id) print('Total queries:\t', num_queries) print('\tTrain:', train_dataset.__len__(), 'Vali:', vali_dataset.__len__(), 'Test:', test_dataset.__len__()) num_docs = get_doc_num(train_dataset) + get_doc_num(vali_dataset) + get_doc_num(test_dataset) print('Total docs:\t', num_docs) if data_id in MSLETOR_SEMI: min_doc, max_doc, sum_rele, sum_unknown = \ get_min_max_docs(train_dataset=train_dataset, vali_dataset=vali_dataset, test_dataset=test_dataset, semi_supervised=True) else: min_doc, max_doc, sum_rele = get_min_max_docs(train_dataset=train_dataset, vali_dataset=vali_dataset, test_dataset=test_dataset) data_meta = get_data_meta(data_id=data_id) max_rele_label = data_meta['max_rele_level'] sum_bin_cnts = get_label_distribution(train_dataset=train_dataset, vali_dataset=vali_dataset, test_dataset=test_dataset, semi_supervised=False, max_lavel=max_rele_label) print('min, max documents per query', min_doc, max_doc) print('total relevant documents', sum_rele) print('avg rele documents per query', sum_rele * 1.0 / num_queries) print('avg documents per query', num_docs * 1.0 / num_queries) print('label distribution: ', sum_bin_cnts) if data_id in MSLETOR_SEMI: print('total unlabeled documents', sum_unknown) #print() #get_min_max_feature(train_dataset=train_dataset, vali_dataset=vali_dataset, test_dataset=test_dataset) #==
Get the basic statistics on the specified dataset
check_dataset_statistics
python
wildltr/ptranking
testing/data/testing_data_utils.py
https://github.com/wildltr/ptranking/blob/master/testing/data/testing_data_utils.py
MIT
def test_ap(): ''' todo-as-note: the denominator should be carefully checked when using AP@k ''' # here we assume that there five relevant documents, but the system just retrieves three of them sys_sorted_labels = torch.Tensor([1.0, 0.0, 1.0, 0.0, 1.0]) std_sorted_labels = torch.Tensor([1.0, 1.0, 1.0, 1.0, 1.0]) ap_at_ks = torch_ap_at_ks(sys_sorted_labels.view(1, -1), std_sorted_labels.view(1, -1), ks=[1, 3, 5]) print(ap_at_ks.size(), ap_at_ks) # tensor([1.0000, 0.5556, 0.4533]) ap_at_k = torch_ap_at_k(sys_sorted_labels.view(1, -1), std_sorted_labels.view(1, -1), k=3) print(ap_at_k.size(), ap_at_k) # tensor([1.0000, 0.5556, 0.4533]) sys_sorted_labels = torch.Tensor([1.0, 0.0, 1.0, 0.0, 1.0]) std_sorted_labels = torch.Tensor([1.0, 1.0, 1.0, 0.0, 0.0]) ap_at_ks = torch_ap_at_ks(sys_sorted_labels.view(1, -1), std_sorted_labels.view(1, -1), ks=[1, 3, 5]) print(ap_at_ks) # tensor([1.0000, 0.5556, 0.7556]) # here we assume that there four relevant documents, the system just retrieves four of them sys_sorted_labels = torch.Tensor([1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0]) std_sorted_labels = torch.Tensor([1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0]) ap_at_ks = torch_ap_at_ks(sys_sorted_labels.view(1, -1), std_sorted_labels.view(1, -1), ks=[1, 2, 3, 5, 7]) print(ap_at_ks) # tensor([1.0000, 1.0000, 0.6667, 0.6875, 0.8304]) ap_at_k = torch_ap_at_k(sys_sorted_labels.view(1, -1), std_sorted_labels.view(1, -1), k=5) print(ap_at_k) # tensor([1.0000, 1.0000, 0.6667, 0.6875, 0.8304]) print()
todo-as-note: the denominator should be carefully checked when using AP@k
test_ap
python
wildltr/ptranking
testing/metric/testing_metric.py
https://github.com/wildltr/ptranking/blob/master/testing/metric/testing_metric.py
MIT
def run( self, times=1, models=None, dataset="Alpha360", universe="", exclude=False, qlib_uri: str = "git+https://github.com/microsoft/qlib#egg=pyqlib", exp_folder_name: str = "run_all_model_records", wait_before_rm_env: bool = False, wait_when_err: bool = False, ): """ Please be aware that this function can only work under Linux. MacOS and Windows will be supported in the future. Any PR to enhance this method is highly welcomed. Besides, this script doesn't support parallel running the same model for multiple times, and this will be fixed in the future development. Parameters: ----------- times : int determines how many times the model should be running. models : str or list determines the specific model or list of models to run or exclude. exclude : boolean determines whether the model being used is excluded or included. dataset : str determines the dataset to be used for each model. universe : str the stock universe of the dataset. default "" indicates that qlib_uri : str the uri to install qlib with pip it could be URI on the remote or local path (NOTE: the local path must be an absolute path) exp_folder_name: str the name of the experiment folder wait_before_rm_env : bool wait before remove environment. wait_when_err : bool wait when errors raised when executing commands Usage: ------- Here are some use cases of the function in the bash: The run_all_models will decide which config to run based no `models` `dataset` `universe` Example 1): models="lightgbm", dataset="Alpha158", universe="" will result in running the following config examples/benchmarks/LightGBM/workflow_config_lightgbm_Alpha158.yaml models="lightgbm", dataset="Alpha158", universe="csi500" will result in running the following config examples/benchmarks/LightGBM/workflow_config_lightgbm_Alpha158_csi500.yaml .. code-block:: bash # Case 1 - run all models multiple times python run_all_model.py run 3 # Case 2 - run specific models multiple times python run_all_model.py run 3 mlp # Case 3 - run specific models multiple times with specific dataset python run_all_model.py run 3 mlp Alpha158 # Case 4 - run other models except those are given as arguments for multiple times python run_all_model.py run 3 [mlp,tft,lstm] --exclude=True # Case 5 - run specific models for one time python run_all_model.py run --models=[mlp,lightgbm] # Case 6 - run other models except those are given as arguments for one time python run_all_model.py run --models=[mlp,tft,sfm] --exclude=True # Case 7 - run lightgbm model on csi500. python run_all_model.py run 3 lightgbm Alpha158 csi500 """ self._init_qlib(exp_folder_name) # get all folders folders = get_all_folders(models, exclude) # init error messages: errors = dict() # run all the model for iterations for fn in folders: # get all files sys.stderr.write("Retrieving files...\n") yaml_path, req_path = get_all_files(folders[fn], dataset, universe=universe) if yaml_path is None: sys.stderr.write(f"There is no {dataset}.yaml file in {folders[fn]}") continue sys.stderr.write("\n") # create env by anaconda temp_dir, env_path, python_path, conda_activate = create_env() # install requirements.txt sys.stderr.write("Installing requirements.txt...\n") with open(req_path) as f: content = f.read() if "torch" in content: # automatically install pytorch according to nvidia's version execute( f"{python_path} -m pip install light-the-torch", wait_when_err=wait_when_err ) # for automatically installing torch according to the nvidia driver execute( f"{env_path / 'bin' / 'ltt'} install --install-cmd '{python_path} -m pip install {{packages}}' -- -r {req_path}", wait_when_err=wait_when_err, ) else: execute(f"{python_path} -m pip install -r {req_path}", wait_when_err=wait_when_err) sys.stderr.write("\n") # read yaml, remove seed kwargs of model, and then save file in the temp_dir yaml_path = gen_yaml_file_without_seed_kwargs(yaml_path, temp_dir) # setup gpu for tft if fn == "TFT": execute( f"conda install -y --prefix {env_path} anaconda cudatoolkit=10.0 && conda install -y --prefix {env_path} cudnn", wait_when_err=wait_when_err, ) sys.stderr.write("\n") # install qlib sys.stderr.write("Installing qlib...\n") execute(f"{python_path} -m pip install --upgrade pip", wait_when_err=wait_when_err) # TODO: FIX ME! execute(f"{python_path} -m pip install --upgrade cython", wait_when_err=wait_when_err) # TODO: FIX ME! if fn == "TFT": execute( f"cd {env_path} && {python_path} -m pip install --upgrade --force-reinstall --ignore-installed PyYAML -e {qlib_uri}", wait_when_err=wait_when_err, ) # TODO: FIX ME! else: execute( f"cd {env_path} && {python_path} -m pip install --upgrade --force-reinstall -e {qlib_uri}", wait_when_err=wait_when_err, ) # TODO: FIX ME! sys.stderr.write("\n") # run workflow_by_config for multiple times for i in range(times): sys.stderr.write(f"Running the model: {fn} for iteration {i+1}...\n") errs = execute( f"{python_path} {env_path / 'bin' / 'qrun'} {yaml_path} {fn} {exp_folder_name}", wait_when_err=wait_when_err, ) if errs is not None: _errs = errors.get(fn, {}) _errs.update({i: errs}) errors[fn] = _errs sys.stderr.write("\n") # remove env sys.stderr.write(f"Deleting the environment: {env_path}...\n") if wait_before_rm_env: input("Press Enter to Continue") shutil.rmtree(env_path) # print errors sys.stderr.write(f"Here are some of the errors of the models...\n") pprint(errors) self._collect_results(exp_folder_name, dataset)
Please be aware that this function can only work under Linux. MacOS and Windows will be supported in the future. Any PR to enhance this method is highly welcomed. Besides, this script doesn't support parallel running the same model for multiple times, and this will be fixed in the future development. Parameters: ----------- times : int determines how many times the model should be running. models : str or list determines the specific model or list of models to run or exclude. exclude : boolean determines whether the model being used is excluded or included. dataset : str determines the dataset to be used for each model. universe : str the stock universe of the dataset. default "" indicates that qlib_uri : str the uri to install qlib with pip it could be URI on the remote or local path (NOTE: the local path must be an absolute path) exp_folder_name: str the name of the experiment folder wait_before_rm_env : bool wait before remove environment. wait_when_err : bool wait when errors raised when executing commands Usage: ------- Here are some use cases of the function in the bash: The run_all_models will decide which config to run based no `models` `dataset` `universe` Example 1): models="lightgbm", dataset="Alpha158", universe="" will result in running the following config examples/benchmarks/LightGBM/workflow_config_lightgbm_Alpha158.yaml models="lightgbm", dataset="Alpha158", universe="csi500" will result in running the following config examples/benchmarks/LightGBM/workflow_config_lightgbm_Alpha158_csi500.yaml .. code-block:: bash # Case 1 - run all models multiple times python run_all_model.py run 3 # Case 2 - run specific models multiple times python run_all_model.py run 3 mlp # Case 3 - run specific models multiple times with specific dataset python run_all_model.py run 3 mlp Alpha158 # Case 4 - run other models except those are given as arguments for multiple times python run_all_model.py run 3 [mlp,tft,lstm] --exclude=True # Case 5 - run specific models for one time python run_all_model.py run --models=[mlp,lightgbm] # Case 6 - run other models except those are given as arguments for one time python run_all_model.py run --models=[mlp,tft,sfm] --exclude=True # Case 7 - run lightgbm model on csi500. python run_all_model.py run 3 lightgbm Alpha158 csi500
run
python
microsoft/qlib
examples/run_all_model.py
https://github.com/microsoft/qlib/blob/master/examples/run_all_model.py
MIT
def process_qlib_data(df, dataset, fillna=False): """Prepare data to fit the TFT model. Args: df: Original DataFrame. fillna: Whether to fill the data with the mean values. Returns: Transformed DataFrame. """ # Several features selected manually feature_col = DATASET_SETTING[dataset]["feature_col"] label_col = [DATASET_SETTING[dataset]["label_col"]] temp_df = df.loc[:, feature_col + label_col] if fillna: temp_df = fill_test_na(temp_df) temp_df = temp_df.swaplevel() temp_df = temp_df.sort_index() temp_df = temp_df.reset_index(level=0) dates = pd.to_datetime(temp_df.index) temp_df["date"] = dates temp_df["day_of_week"] = dates.dayofweek temp_df["month"] = dates.month temp_df["year"] = dates.year temp_df["const"] = 1.0 return temp_df
Prepare data to fit the TFT model. Args: df: Original DataFrame. fillna: Whether to fill the data with the mean values. Returns: Transformed DataFrame.
process_qlib_data
python
microsoft/qlib
examples/benchmarks/TFT/tft.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/tft.py
MIT
def process_predicted(df, col_name): """Transform the TFT predicted data into Qlib format. Args: df: Original DataFrame. fillna: New column name. Returns: Transformed DataFrame. """ df_res = df.copy() df_res = df_res.rename(columns={"forecast_time": "datetime", "identifier": "instrument", "t+4": col_name}) df_res = df_res.set_index(["datetime", "instrument"]).sort_index() df_res = df_res[[col_name]] return df_res
Transform the TFT predicted data into Qlib format. Args: df: Original DataFrame. fillna: New column name. Returns: Transformed DataFrame.
process_predicted
python
microsoft/qlib
examples/benchmarks/TFT/tft.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/tft.py
MIT
def to_pickle(self, path: Union[Path, str]): """ Tensorflow model can't be dumped directly. So the data should be save separately **TODO**: Please implement the function to load the files Parameters ---------- path : Union[Path, str] the target path to be dumped """ # FIXME: implementing saving tensorflow models # save tensorflow model # path = Path(path) # path.mkdir(parents=True) # self.model.save(path) # save qlib model wrapper drop_attrs = ["model", "tf_graph", "sess", "data_formatter"] orig_attr = {} for attr in drop_attrs: orig_attr[attr] = getattr(self, attr) setattr(self, attr, None) super(TFTModel, self).to_pickle(path) for attr in drop_attrs: setattr(self, attr, orig_attr[attr])
Tensorflow model can't be dumped directly. So the data should be save separately **TODO**: Please implement the function to load the files Parameters ---------- path : Union[Path, str] the target path to be dumped
to_pickle
python
microsoft/qlib
examples/benchmarks/TFT/tft.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/tft.py
MIT
def get_column_definition(self): """Returns formatted column definition in order expected by the TFT.""" column_definition = self._column_definition # Sanity checks first. # Ensure only one ID and time column exist def _check_single_column(input_type): length = len([tup for tup in column_definition if tup[2] == input_type]) if length != 1: raise ValueError("Illegal number of inputs ({}) of type {}".format(length, input_type)) _check_single_column(InputTypes.ID) _check_single_column(InputTypes.TIME) identifier = [tup for tup in column_definition if tup[2] == InputTypes.ID] time = [tup for tup in column_definition if tup[2] == InputTypes.TIME] real_inputs = [ tup for tup in column_definition if tup[1] == DataTypes.REAL_VALUED and tup[2] not in {InputTypes.ID, InputTypes.TIME} ] categorical_inputs = [ tup for tup in column_definition if tup[1] == DataTypes.CATEGORICAL and tup[2] not in {InputTypes.ID, InputTypes.TIME} ] return identifier + time + real_inputs + categorical_inputs
Returns formatted column definition in order expected by the TFT.
get_column_definition
python
microsoft/qlib
examples/benchmarks/TFT/data_formatters/base.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/data_formatters/base.py
MIT
def _get_tft_input_indices(self): """Returns the relevant indexes and input sizes required by TFT.""" # Functions def _extract_tuples_from_data_type(data_type, defn): return [tup for tup in defn if tup[1] == data_type and tup[2] not in {InputTypes.ID, InputTypes.TIME}] def _get_locations(input_types, defn): return [i for i, tup in enumerate(defn) if tup[2] in input_types] # Start extraction column_definition = [ tup for tup in self.get_column_definition() if tup[2] not in {InputTypes.ID, InputTypes.TIME} ] categorical_inputs = _extract_tuples_from_data_type(DataTypes.CATEGORICAL, column_definition) real_inputs = _extract_tuples_from_data_type(DataTypes.REAL_VALUED, column_definition) locations = { "input_size": len(self._get_input_columns()), "output_size": len(_get_locations({InputTypes.TARGET}, column_definition)), "category_counts": self.num_classes_per_cat_input, "input_obs_loc": _get_locations({InputTypes.TARGET}, column_definition), "static_input_loc": _get_locations({InputTypes.STATIC_INPUT}, column_definition), "known_regular_inputs": _get_locations({InputTypes.STATIC_INPUT, InputTypes.KNOWN_INPUT}, real_inputs), "known_categorical_inputs": _get_locations( {InputTypes.STATIC_INPUT, InputTypes.KNOWN_INPUT}, categorical_inputs ), } return locations
Returns the relevant indexes and input sizes required by TFT.
_get_tft_input_indices
python
microsoft/qlib
examples/benchmarks/TFT/data_formatters/base.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/data_formatters/base.py
MIT
def get_experiment_params(self): """Returns fixed model parameters for experiments.""" required_keys = [ "total_time_steps", "num_encoder_steps", "num_epochs", "early_stopping_patience", "multiprocessing_workers", ] fixed_params = self.get_fixed_params() for k in required_keys: if k not in fixed_params: raise ValueError("Field {}".format(k) + " missing from fixed parameter definitions!") fixed_params["column_definition"] = self.get_column_definition() fixed_params.update(self._get_tft_input_indices()) return fixed_params
Returns fixed model parameters for experiments.
get_experiment_params
python
microsoft/qlib
examples/benchmarks/TFT/data_formatters/base.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/data_formatters/base.py
MIT
def split_data(self, df, valid_boundary=2016, test_boundary=2018): """Splits data frame into training-validation-test data frames. This also calibrates scaling object, and transforms data for each split. Args: df: Source data frame to split. valid_boundary: Starting year for validation data test_boundary: Starting year for test data Returns: Tuple of transformed (train, valid, test) data. """ print("Formatting train-valid-test splits.") index = df["year"] train = df.loc[index < valid_boundary] valid = df.loc[(index >= valid_boundary) & (index < test_boundary)] test = df.loc[index >= test_boundary] self.set_scalers(train) return (self.transform_inputs(data) for data in [train, valid, test])
Splits data frame into training-validation-test data frames. This also calibrates scaling object, and transforms data for each split. Args: df: Source data frame to split. valid_boundary: Starting year for validation data test_boundary: Starting year for test data Returns: Tuple of transformed (train, valid, test) data.
split_data
python
microsoft/qlib
examples/benchmarks/TFT/data_formatters/qlib_Alpha158.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/data_formatters/qlib_Alpha158.py
MIT
def set_scalers(self, df): """Calibrates scalers using the data supplied. Args: df: Data to use to calibrate scalers. """ print("Setting scalers with training data...") column_definitions = self.get_column_definition() id_column = utils.get_single_col_by_input_type(InputTypes.ID, column_definitions) target_column = utils.get_single_col_by_input_type(InputTypes.TARGET, column_definitions) # Extract identifiers in case required self.identifiers = list(df[id_column].unique()) # Format real scalers real_inputs = utils.extract_cols_from_data_type( DataTypes.REAL_VALUED, column_definitions, {InputTypes.ID, InputTypes.TIME} ) data = df[real_inputs].values self._real_scalers = sklearn.preprocessing.StandardScaler().fit(data) self._target_scaler = sklearn.preprocessing.StandardScaler().fit( df[[target_column]].values ) # used for predictions # Format categorical scalers categorical_inputs = utils.extract_cols_from_data_type( DataTypes.CATEGORICAL, column_definitions, {InputTypes.ID, InputTypes.TIME} ) categorical_scalers = {} num_classes = [] for col in categorical_inputs: # Set all to str so that we don't have mixed integer/string columns srs = df[col].apply(str) categorical_scalers[col] = sklearn.preprocessing.LabelEncoder().fit(srs.values) num_classes.append(srs.nunique()) # Set categorical scaler outputs self._cat_scalers = categorical_scalers self._num_classes_per_cat_input = num_classes
Calibrates scalers using the data supplied. Args: df: Data to use to calibrate scalers.
set_scalers
python
microsoft/qlib
examples/benchmarks/TFT/data_formatters/qlib_Alpha158.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/data_formatters/qlib_Alpha158.py
MIT
def transform_inputs(self, df): """Performs feature transformations. This includes both feature engineering, preprocessing and normalisation. Args: df: Data frame to transform. Returns: Transformed data frame. """ output = df.copy() if self._real_scalers is None and self._cat_scalers is None: raise ValueError("Scalers have not been set!") column_definitions = self.get_column_definition() real_inputs = utils.extract_cols_from_data_type( DataTypes.REAL_VALUED, column_definitions, {InputTypes.ID, InputTypes.TIME} ) categorical_inputs = utils.extract_cols_from_data_type( DataTypes.CATEGORICAL, column_definitions, {InputTypes.ID, InputTypes.TIME} ) # Format real inputs output[real_inputs] = self._real_scalers.transform(df[real_inputs].values) # Format categorical inputs for col in categorical_inputs: string_df = df[col].apply(str) output[col] = self._cat_scalers[col].transform(string_df) return output
Performs feature transformations. This includes both feature engineering, preprocessing and normalisation. Args: df: Data frame to transform. Returns: Transformed data frame.
transform_inputs
python
microsoft/qlib
examples/benchmarks/TFT/data_formatters/qlib_Alpha158.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/data_formatters/qlib_Alpha158.py
MIT
def format_predictions(self, predictions): """Reverts any normalisation to give predictions in original scale. Args: predictions: Dataframe of model predictions. Returns: Data frame of unnormalised predictions. """ output = predictions.copy() column_names = predictions.columns for col in column_names: if col not in {"forecast_time", "identifier"}: # Using [col] is for aligning with the format when fitting output[col] = self._target_scaler.inverse_transform(predictions[[col]]) return output
Reverts any normalisation to give predictions in original scale. Args: predictions: Dataframe of model predictions. Returns: Data frame of unnormalised predictions.
format_predictions
python
microsoft/qlib
examples/benchmarks/TFT/data_formatters/qlib_Alpha158.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/data_formatters/qlib_Alpha158.py
MIT
def get_fixed_params(self): """Returns fixed model parameters for experiments.""" fixed_params = { "total_time_steps": 6 + 6, "num_encoder_steps": 6, "num_epochs": 100, "early_stopping_patience": 10, "multiprocessing_workers": 5, } return fixed_params
Returns fixed model parameters for experiments.
get_fixed_params
python
microsoft/qlib
examples/benchmarks/TFT/data_formatters/qlib_Alpha158.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/data_formatters/qlib_Alpha158.py
MIT
def __init__(self, experiment="volatility", root_folder=None): """Creates configs based on default experiment chosen. Args: experiment: Name of experiment. root_folder: Root folder to save all outputs of training. """ if experiment not in self.default_experiments: raise ValueError("Unrecognised experiment={}".format(experiment)) # Defines all relevant paths if root_folder is None: root_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "outputs") print("Using root folder {}".format(root_folder)) self.root_folder = root_folder self.experiment = experiment self.data_folder = os.path.join(root_folder, "data", experiment) self.model_folder = os.path.join(root_folder, "saved_models", experiment) self.results_folder = os.path.join(root_folder, "results", experiment) # Creates folders if they don't exist for relevant_directory in [self.root_folder, self.data_folder, self.model_folder, self.results_folder]: if not os.path.exists(relevant_directory): os.makedirs(relevant_directory)
Creates configs based on default experiment chosen. Args: experiment: Name of experiment. root_folder: Root folder to save all outputs of training.
__init__
python
microsoft/qlib
examples/benchmarks/TFT/expt_settings/configs.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/expt_settings/configs.py
MIT
def make_data_formatter(self): """Gets a data formatter object for experiment. Returns: Default DataFormatter per experiment. """ data_formatter_class = { "Alpha158": data_formatters.qlib_Alpha158.Alpha158Formatter, } return data_formatter_class[self.experiment]()
Gets a data formatter object for experiment. Returns: Default DataFormatter per experiment.
make_data_formatter
python
microsoft/qlib
examples/benchmarks/TFT/expt_settings/configs.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/expt_settings/configs.py
MIT
def __init__(self, param_ranges, fixed_params, model_folder, override_w_fixed_params=True): """Instantiates model. Args: param_ranges: Discrete hyperparameter range for random search. fixed_params: Fixed model parameters per experiment. model_folder: Folder to store optimisation artifacts. override_w_fixed_params: Whether to override serialsed fixed model parameters with new supplied values. """ self.param_ranges = param_ranges self._max_tries = 1000 self.results = pd.DataFrame() self.fixed_params = fixed_params self.saved_params = pd.DataFrame() self.best_score = np.Inf self.optimal_name = "" # Setup # Create folder for saving if its not there self.hyperparam_folder = model_folder utils.create_folder_if_not_exist(self.hyperparam_folder) self._override_w_fixed_params = override_w_fixed_params
Instantiates model. Args: param_ranges: Discrete hyperparameter range for random search. fixed_params: Fixed model parameters per experiment. model_folder: Folder to store optimisation artifacts. override_w_fixed_params: Whether to override serialsed fixed model parameters with new supplied values.
__init__
python
microsoft/qlib
examples/benchmarks/TFT/libs/hyperparam_opt.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/hyperparam_opt.py
MIT
def load_results(self): """Loads results from previous hyperparameter optimisation. Returns: A boolean indicating if previous results can be loaded. """ print("Loading results from", self.hyperparam_folder) results_file = os.path.join(self.hyperparam_folder, "results.csv") params_file = os.path.join(self.hyperparam_folder, "params.csv") if os.path.exists(results_file) and os.path.exists(params_file): self.results = pd.read_csv(results_file, index_col=0) self.saved_params = pd.read_csv(params_file, index_col=0) if not self.results.empty: self.results.at["loss"] = self.results.loc["loss"].apply(float) self.best_score = self.results.loc["loss"].min() is_optimal = self.results.loc["loss"] == self.best_score self.optimal_name = self.results.T[is_optimal].index[0] return True return False
Loads results from previous hyperparameter optimisation. Returns: A boolean indicating if previous results can be loaded.
load_results
python
microsoft/qlib
examples/benchmarks/TFT/libs/hyperparam_opt.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/hyperparam_opt.py
MIT
def _get_params_from_name(self, name): """Returns previously saved parameters given a key.""" params = self.saved_params selected_params = dict(params[name]) if self._override_w_fixed_params: for k in self.fixed_params: selected_params[k] = self.fixed_params[k] return selected_params
Returns previously saved parameters given a key.
_get_params_from_name
python
microsoft/qlib
examples/benchmarks/TFT/libs/hyperparam_opt.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/hyperparam_opt.py
MIT
def clear(self): """Clears all previous results and saved parameters.""" shutil.rmtree(self.hyperparam_folder) os.makedirs(self.hyperparam_folder) self.results = pd.DataFrame() self.saved_params = pd.DataFrame()
Clears all previous results and saved parameters.
clear
python
microsoft/qlib
examples/benchmarks/TFT/libs/hyperparam_opt.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/hyperparam_opt.py
MIT
def _check_params(self, params): """Checks that parameter map is properly defined.""" valid_fields = list(self.param_ranges.keys()) + list(self.fixed_params.keys()) invalid_fields = [k for k in params if k not in valid_fields] missing_fields = [k for k in valid_fields if k not in params] if invalid_fields: raise ValueError("Invalid Fields Found {} - Valid ones are {}".format(invalid_fields, valid_fields)) if missing_fields: raise ValueError("Missing Fields Found {} - Valid ones are {}".format(missing_fields, valid_fields))
Checks that parameter map is properly defined.
_check_params
python
microsoft/qlib
examples/benchmarks/TFT/libs/hyperparam_opt.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/hyperparam_opt.py
MIT
def _get_name(self, params): """Returns a unique key for the supplied set of params.""" self._check_params(params) fields = list(params.keys()) fields.sort() return "_".join([str(params[k]) for k in fields])
Returns a unique key for the supplied set of params.
_get_name
python
microsoft/qlib
examples/benchmarks/TFT/libs/hyperparam_opt.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/hyperparam_opt.py
MIT
def get_next_parameters(self, ranges_to_skip=None): """Returns the next set of parameters to optimise. Args: ranges_to_skip: Explicitly defines a set of keys to skip. """ if ranges_to_skip is None: ranges_to_skip = set(self.results.index) if not isinstance(self.param_ranges, dict): raise ValueError("Only works for random search!") param_range_keys = list(self.param_ranges.keys()) param_range_keys.sort() def _get_next(): """Returns next hyperparameter set per try.""" parameters = {k: np.random.choice(self.param_ranges[k]) for k in param_range_keys} # Adds fixed params for k in self.fixed_params: parameters[k] = self.fixed_params[k] return parameters for _ in range(self._max_tries): parameters = _get_next() name = self._get_name(parameters) if name not in ranges_to_skip: return parameters raise ValueError("Exceeded max number of hyperparameter searches!!")
Returns the next set of parameters to optimise. Args: ranges_to_skip: Explicitly defines a set of keys to skip.
get_next_parameters
python
microsoft/qlib
examples/benchmarks/TFT/libs/hyperparam_opt.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/hyperparam_opt.py
MIT
def _get_next(): """Returns next hyperparameter set per try.""" parameters = {k: np.random.choice(self.param_ranges[k]) for k in param_range_keys} # Adds fixed params for k in self.fixed_params: parameters[k] = self.fixed_params[k] return parameters
Returns next hyperparameter set per try.
_get_next
python
microsoft/qlib
examples/benchmarks/TFT/libs/hyperparam_opt.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/hyperparam_opt.py
MIT
def update_score(self, parameters, loss, model, info=""): """Updates the results from last optimisation run. Args: parameters: Hyperparameters used in optimisation. loss: Validation loss obtained. model: Model to serialised if required. info: Any ancillary information to tag on to results. Returns: Boolean flag indicating if the model is the best seen so far. """ if np.isnan(loss): loss = np.Inf if not os.path.isdir(self.hyperparam_folder): os.makedirs(self.hyperparam_folder) name = self._get_name(parameters) is_optimal = self.results.empty or loss < self.best_score # save the first model if is_optimal: # Try saving first, before updating info if model is not None: print("Optimal model found, updating") model.save(self.hyperparam_folder) self.best_score = loss self.optimal_name = name self.results[name] = pd.Series({"loss": loss, "info": info}) self.saved_params[name] = pd.Series(parameters) self.results.to_csv(os.path.join(self.hyperparam_folder, "results.csv")) self.saved_params.to_csv(os.path.join(self.hyperparam_folder, "params.csv")) return is_optimal
Updates the results from last optimisation run. Args: parameters: Hyperparameters used in optimisation. loss: Validation loss obtained. model: Model to serialised if required. info: Any ancillary information to tag on to results. Returns: Boolean flag indicating if the model is the best seen so far.
update_score
python
microsoft/qlib
examples/benchmarks/TFT/libs/hyperparam_opt.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/hyperparam_opt.py
MIT
def __init__( self, param_ranges, fixed_params, root_model_folder, worker_number, search_iterations=1000, num_iterations_per_worker=5, clear_serialised_params=False, ): """Instantiates optimisation manager. This hyperparameter optimisation pre-generates #search_iterations hyperparameter combinations and serialises them at the start. At runtime, each worker goes through their own set of parameter ranges. The pregeneration allows for multiple workers to run in parallel on different machines without resulting in parameter overlaps. Args: param_ranges: Discrete hyperparameter range for random search. fixed_params: Fixed model parameters per experiment. root_model_folder: Folder to store optimisation artifacts. worker_number: Worker index defining which set of hyperparameters to test. search_iterations: Maximum number of random search iterations. num_iterations_per_worker: How many iterations are handled per worker. clear_serialised_params: Whether to regenerate hyperparameter combinations. """ max_workers = int(np.ceil(search_iterations / num_iterations_per_worker)) # Sanity checks if worker_number > max_workers: raise ValueError( "Worker number ({}) cannot be larger than the total number of workers!".format(max_workers) ) if worker_number > search_iterations: raise ValueError( "Worker number ({}) cannot be larger than the max search iterations ({})!".format( worker_number, search_iterations ) ) print("*** Creating hyperparameter manager for worker {} ***".format(worker_number)) hyperparam_folder = os.path.join(root_model_folder, str(worker_number)) super().__init__(param_ranges, fixed_params, hyperparam_folder, override_w_fixed_params=True) serialised_ranges_folder = os.path.join(root_model_folder, "hyperparams") if clear_serialised_params: print("Regenerating hyperparameter list") if os.path.exists(serialised_ranges_folder): shutil.rmtree(serialised_ranges_folder) utils.create_folder_if_not_exist(serialised_ranges_folder) self.serialised_ranges_path = os.path.join(serialised_ranges_folder, "ranges_{}.csv".format(search_iterations)) self.hyperparam_folder = hyperparam_folder # override self.worker_num = worker_number self.total_search_iterations = search_iterations self.num_iterations_per_worker = num_iterations_per_worker self.global_hyperparam_df = self.load_serialised_hyperparam_df() self.worker_search_queue = self._get_worker_search_queue()
Instantiates optimisation manager. This hyperparameter optimisation pre-generates #search_iterations hyperparameter combinations and serialises them at the start. At runtime, each worker goes through their own set of parameter ranges. The pregeneration allows for multiple workers to run in parallel on different machines without resulting in parameter overlaps. Args: param_ranges: Discrete hyperparameter range for random search. fixed_params: Fixed model parameters per experiment. root_model_folder: Folder to store optimisation artifacts. worker_number: Worker index defining which set of hyperparameters to test. search_iterations: Maximum number of random search iterations. num_iterations_per_worker: How many iterations are handled per worker. clear_serialised_params: Whether to regenerate hyperparameter combinations.
__init__
python
microsoft/qlib
examples/benchmarks/TFT/libs/hyperparam_opt.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/hyperparam_opt.py
MIT
def get_next_parameters(self): """Returns next dictionary of hyperparameters to optimise.""" param_name = self.worker_search_queue.pop() params = self.global_hyperparam_df.loc[param_name, :].to_dict() # Always override! for k in self.fixed_params: print("Overriding saved {}: {}".format(k, self.fixed_params[k])) params[k] = self.fixed_params[k] return params
Returns next dictionary of hyperparameters to optimise.
get_next_parameters
python
microsoft/qlib
examples/benchmarks/TFT/libs/hyperparam_opt.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/hyperparam_opt.py
MIT
def load_serialised_hyperparam_df(self): """Loads serialsed hyperparameter ranges from file. Returns: DataFrame containing hyperparameter combinations. """ print( "Loading params for {} search iterations form {}".format( self.total_search_iterations, self.serialised_ranges_path ) ) if os.path.exists(self.serialised_ranges_folder): df = pd.read_csv(self.serialised_ranges_path, index_col=0) else: print("Unable to load - regenerating search ranges instead") df = self.update_serialised_hyperparam_df() return df
Loads serialsed hyperparameter ranges from file. Returns: DataFrame containing hyperparameter combinations.
load_serialised_hyperparam_df
python
microsoft/qlib
examples/benchmarks/TFT/libs/hyperparam_opt.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/hyperparam_opt.py
MIT
def update_serialised_hyperparam_df(self): """Regenerates hyperparameter combinations and saves to file. Returns: DataFrame containing hyperparameter combinations. """ search_df = self._generate_full_hyperparam_df() print( "Serialising params for {} search iterations to {}".format( self.total_search_iterations, self.serialised_ranges_path ) ) search_df.to_csv(self.serialised_ranges_path) return search_df
Regenerates hyperparameter combinations and saves to file. Returns: DataFrame containing hyperparameter combinations.
update_serialised_hyperparam_df
python
microsoft/qlib
examples/benchmarks/TFT/libs/hyperparam_opt.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/hyperparam_opt.py
MIT
def _generate_full_hyperparam_df(self): """Generates actual hyperparameter combinations. Returns: DataFrame containing hyperparameter combinations. """ np.random.seed(131) # for reproducibility of hyperparam list name_list = [] param_list = [] for _ in range(self.total_search_iterations): params = super().get_next_parameters(name_list) name = self._get_name(params) name_list.append(name) param_list.append(params) full_search_df = pd.DataFrame(param_list, index=name_list) return full_search_df
Generates actual hyperparameter combinations. Returns: DataFrame containing hyperparameter combinations.
_generate_full_hyperparam_df
python
microsoft/qlib
examples/benchmarks/TFT/libs/hyperparam_opt.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/hyperparam_opt.py
MIT
def clear(self): # reset when cleared """Clears results for hyperparameter manager and resets.""" super().clear() self.worker_search_queue = self._get_worker_search_queue()
Clears results for hyperparameter manager and resets.
clear
python
microsoft/qlib
examples/benchmarks/TFT/libs/hyperparam_opt.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/hyperparam_opt.py
MIT
def load_results(self): """Load results from file and queue parameter combinations to try. Returns: Boolean indicating if results were successfully loaded. """ success = super().load_results() if success: self.worker_search_queue = self._get_worker_search_queue() return success
Load results from file and queue parameter combinations to try. Returns: Boolean indicating if results were successfully loaded.
load_results
python
microsoft/qlib
examples/benchmarks/TFT/libs/hyperparam_opt.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/hyperparam_opt.py
MIT
def _get_worker_search_queue(self): """Generates the queue of param combinations for current worker. Returns: Queue of hyperparameter combinations outstanding. """ global_df = self.assign_worker_numbers(self.global_hyperparam_df) worker_df = global_df[global_df["worker"] == self.worker_num] left_overs = [s for s in worker_df.index if s not in self.results.columns] return Deque(left_overs)
Generates the queue of param combinations for current worker. Returns: Queue of hyperparameter combinations outstanding.
_get_worker_search_queue
python
microsoft/qlib
examples/benchmarks/TFT/libs/hyperparam_opt.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/hyperparam_opt.py
MIT
def assign_worker_numbers(self, df): """Updates parameter combinations with the index of the worker used. Args: df: DataFrame of parameter combinations. Returns: Updated DataFrame with worker number. """ output = df.copy() n = self.total_search_iterations batch_size = self.num_iterations_per_worker max_worker_num = int(np.ceil(n / batch_size)) worker_idx = np.concatenate([np.tile(i + 1, self.num_iterations_per_worker) for i in range(max_worker_num)]) output["worker"] = worker_idx[: len(output)] return output
Updates parameter combinations with the index of the worker used. Args: df: DataFrame of parameter combinations. Returns: Updated DataFrame with worker number.
assign_worker_numbers
python
microsoft/qlib
examples/benchmarks/TFT/libs/hyperparam_opt.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/hyperparam_opt.py
MIT
def linear_layer(size, activation=None, use_time_distributed=False, use_bias=True): """Returns simple Keras linear layer. Args: size: Output size activation: Activation function to apply if required use_time_distributed: Whether to apply layer across time use_bias: Whether bias should be included in layer """ linear = tf.keras.layers.Dense(size, activation=activation, use_bias=use_bias) if use_time_distributed: linear = tf.keras.layers.TimeDistributed(linear) return linear
Returns simple Keras linear layer. Args: size: Output size activation: Activation function to apply if required use_time_distributed: Whether to apply layer across time use_bias: Whether bias should be included in layer
linear_layer
python
microsoft/qlib
examples/benchmarks/TFT/libs/tft_model.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
MIT
def apply_mlp( inputs, hidden_size, output_size, output_activation=None, hidden_activation="tanh", use_time_distributed=False ): """Applies simple feed-forward network to an input. Args: inputs: MLP inputs hidden_size: Hidden state size output_size: Output size of MLP output_activation: Activation function to apply on output hidden_activation: Activation function to apply on input use_time_distributed: Whether to apply across time Returns: Tensor for MLP outputs. """ if use_time_distributed: hidden = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(hidden_size, activation=hidden_activation))( inputs ) return tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(output_size, activation=output_activation))(hidden) else: hidden = tf.keras.layers.Dense(hidden_size, activation=hidden_activation)(inputs) return tf.keras.layers.Dense(output_size, activation=output_activation)(hidden)
Applies simple feed-forward network to an input. Args: inputs: MLP inputs hidden_size: Hidden state size output_size: Output size of MLP output_activation: Activation function to apply on output hidden_activation: Activation function to apply on input use_time_distributed: Whether to apply across time Returns: Tensor for MLP outputs.
apply_mlp
python
microsoft/qlib
examples/benchmarks/TFT/libs/tft_model.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
MIT
def apply_gating_layer(x, hidden_layer_size, dropout_rate=None, use_time_distributed=True, activation=None): """Applies a Gated Linear Unit (GLU) to an input. Args: x: Input to gating layer hidden_layer_size: Dimension of GLU dropout_rate: Dropout rate to apply if any use_time_distributed: Whether to apply across time activation: Activation function to apply to the linear feature transform if necessary Returns: Tuple of tensors for: (GLU output, gate) """ if dropout_rate is not None: x = tf.keras.layers.Dropout(dropout_rate)(x) if use_time_distributed: activation_layer = tf.keras.layers.TimeDistributed( tf.keras.layers.Dense(hidden_layer_size, activation=activation) )(x) gated_layer = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(hidden_layer_size, activation="sigmoid"))(x) else: activation_layer = tf.keras.layers.Dense(hidden_layer_size, activation=activation)(x) gated_layer = tf.keras.layers.Dense(hidden_layer_size, activation="sigmoid")(x) return tf.keras.layers.Multiply()([activation_layer, gated_layer]), gated_layer
Applies a Gated Linear Unit (GLU) to an input. Args: x: Input to gating layer hidden_layer_size: Dimension of GLU dropout_rate: Dropout rate to apply if any use_time_distributed: Whether to apply across time activation: Activation function to apply to the linear feature transform if necessary Returns: Tuple of tensors for: (GLU output, gate)
apply_gating_layer
python
microsoft/qlib
examples/benchmarks/TFT/libs/tft_model.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
MIT
def add_and_norm(x_list): """Applies skip connection followed by layer normalisation. Args: x_list: List of inputs to sum for skip connection Returns: Tensor output from layer. """ tmp = Add()(x_list) tmp = LayerNorm()(tmp) return tmp
Applies skip connection followed by layer normalisation. Args: x_list: List of inputs to sum for skip connection Returns: Tensor output from layer.
add_and_norm
python
microsoft/qlib
examples/benchmarks/TFT/libs/tft_model.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
MIT
def gated_residual_network( x, hidden_layer_size, output_size=None, dropout_rate=None, use_time_distributed=True, additional_context=None, return_gate=False, ): """Applies the gated residual network (GRN) as defined in paper. Args: x: Network inputs hidden_layer_size: Internal state size output_size: Size of output layer dropout_rate: Dropout rate if dropout is applied use_time_distributed: Whether to apply network across time dimension additional_context: Additional context vector to use if relevant return_gate: Whether to return GLU gate for diagnostic purposes Returns: Tuple of tensors for: (GRN output, GLU gate) """ # Setup skip connection if output_size is None: output_size = hidden_layer_size skip = x else: linear = Dense(output_size) if use_time_distributed: linear = tf.keras.layers.TimeDistributed(linear) skip = linear(x) # Apply feedforward network hidden = linear_layer(hidden_layer_size, activation=None, use_time_distributed=use_time_distributed)(x) if additional_context is not None: hidden = hidden + linear_layer( hidden_layer_size, activation=None, use_time_distributed=use_time_distributed, use_bias=False )(additional_context) hidden = tf.keras.layers.Activation("elu")(hidden) hidden = linear_layer(hidden_layer_size, activation=None, use_time_distributed=use_time_distributed)(hidden) gating_layer, gate = apply_gating_layer( hidden, output_size, dropout_rate=dropout_rate, use_time_distributed=use_time_distributed, activation=None ) if return_gate: return add_and_norm([skip, gating_layer]), gate else: return add_and_norm([skip, gating_layer])
Applies the gated residual network (GRN) as defined in paper. Args: x: Network inputs hidden_layer_size: Internal state size output_size: Size of output layer dropout_rate: Dropout rate if dropout is applied use_time_distributed: Whether to apply network across time dimension additional_context: Additional context vector to use if relevant return_gate: Whether to return GLU gate for diagnostic purposes Returns: Tuple of tensors for: (GRN output, GLU gate)
gated_residual_network
python
microsoft/qlib
examples/benchmarks/TFT/libs/tft_model.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
MIT
def get_decoder_mask(self_attn_inputs): """Returns causal mask to apply for self-attention layer. Args: self_attn_inputs: Inputs to self attention layer to determine mask shape """ len_s = tf.shape(self_attn_inputs)[1] bs = tf.shape(self_attn_inputs)[:1] mask = K.cumsum(tf.eye(len_s, batch_shape=bs), 1) return mask
Returns causal mask to apply for self-attention layer. Args: self_attn_inputs: Inputs to self attention layer to determine mask shape
get_decoder_mask
python
microsoft/qlib
examples/benchmarks/TFT/libs/tft_model.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
MIT
def __call__(self, q, k, v, mask): """Applies scaled dot product attention. Args: q: Queries k: Keys v: Values mask: Masking if required -- sets softmax to very large value Returns: Tuple of (layer outputs, attention weights) """ temper = tf.sqrt(tf.cast(tf.shape(k)[-1], dtype="float32")) attn = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[2, 2]) / temper)([q, k]) # shape=(batch, q, k) if mask is not None: mmask = Lambda(lambda x: (-1e9) * (1.0 - K.cast(x, "float32")))(mask) # setting to infinity attn = Add()([attn, mmask]) attn = self.activation(attn) attn = self.dropout(attn) output = Lambda(lambda x: K.batch_dot(x[0], x[1]))([attn, v]) return output, attn
Applies scaled dot product attention. Args: q: Queries k: Keys v: Values mask: Masking if required -- sets softmax to very large value Returns: Tuple of (layer outputs, attention weights)
__call__
python
microsoft/qlib
examples/benchmarks/TFT/libs/tft_model.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
MIT
def __init__(self, n_head, d_model, dropout): """Initialises layer. Args: n_head: Number of heads d_model: TFT state dimensionality dropout: Dropout discard rate """ self.n_head = n_head self.d_k = self.d_v = d_k = d_v = d_model // n_head self.dropout = dropout self.qs_layers = [] self.ks_layers = [] self.vs_layers = [] # Use same value layer to facilitate interp vs_layer = Dense(d_v, use_bias=False) for _ in range(n_head): self.qs_layers.append(Dense(d_k, use_bias=False)) self.ks_layers.append(Dense(d_k, use_bias=False)) self.vs_layers.append(vs_layer) # use same vs_layer self.attention = ScaledDotProductAttention() self.w_o = Dense(d_model, use_bias=False)
Initialises layer. Args: n_head: Number of heads d_model: TFT state dimensionality dropout: Dropout discard rate
__init__
python
microsoft/qlib
examples/benchmarks/TFT/libs/tft_model.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
MIT
def __call__(self, q, k, v, mask=None): """Applies interpretable multihead attention. Using T to denote the number of time steps fed into the transformer. Args: q: Query tensor of shape=(?, T, d_model) k: Key of shape=(?, T, d_model) v: Values of shape=(?, T, d_model) mask: Masking if required with shape=(?, T, T) Returns: Tuple of (layer outputs, attention weights) """ n_head = self.n_head heads = [] attns = [] for i in range(n_head): qs = self.qs_layers[i](q) ks = self.ks_layers[i](k) vs = self.vs_layers[i](v) head, attn = self.attention(qs, ks, vs, mask) head_dropout = Dropout(self.dropout)(head) heads.append(head_dropout) attns.append(attn) head = K.stack(heads) if n_head > 1 else heads[0] attn = K.stack(attns) outputs = K.mean(head, axis=0) if n_head > 1 else head outputs = self.w_o(outputs) outputs = Dropout(self.dropout)(outputs) # output dropout return outputs, attn
Applies interpretable multihead attention. Using T to denote the number of time steps fed into the transformer. Args: q: Query tensor of shape=(?, T, d_model) k: Key of shape=(?, T, d_model) v: Values of shape=(?, T, d_model) mask: Masking if required with shape=(?, T, T) Returns: Tuple of (layer outputs, attention weights)
__call__
python
microsoft/qlib
examples/benchmarks/TFT/libs/tft_model.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
MIT
def __init__(self, raw_params, use_cudnn=False): """Builds TFT from parameters. Args: raw_params: Parameters to define TFT use_cudnn: Whether to use CUDNN GPU optimised LSTM """ self.name = self.__class__.__name__ params = dict(raw_params) # copy locally # Data parameters self.time_steps = int(params["total_time_steps"]) self.input_size = int(params["input_size"]) self.output_size = int(params["output_size"]) self.category_counts = json.loads(str(params["category_counts"])) self.n_multiprocessing_workers = int(params["multiprocessing_workers"]) # Relevant indices for TFT self._input_obs_loc = json.loads(str(params["input_obs_loc"])) self._static_input_loc = json.loads(str(params["static_input_loc"])) self._known_regular_input_idx = json.loads(str(params["known_regular_inputs"])) self._known_categorical_input_idx = json.loads(str(params["known_categorical_inputs"])) self.column_definition = params["column_definition"] # Network params self.quantiles = [0.1, 0.5, 0.9] self.use_cudnn = use_cudnn # Whether to use GPU optimised LSTM self.hidden_layer_size = int(params["hidden_layer_size"]) self.dropout_rate = float(params["dropout_rate"]) self.max_gradient_norm = float(params["max_gradient_norm"]) self.learning_rate = float(params["learning_rate"]) self.minibatch_size = int(params["minibatch_size"]) self.num_epochs = int(params["num_epochs"]) self.early_stopping_patience = int(params["early_stopping_patience"]) self.num_encoder_steps = int(params["num_encoder_steps"]) self.num_stacks = int(params["stack_size"]) self.num_heads = int(params["num_heads"]) # Serialisation options self._temp_folder = os.path.join(params["model_folder"], "tmp") self.reset_temp_folder() # Extra components to store Tensorflow nodes for attention computations self._input_placeholder = None self._attention_components = None self._prediction_parts = None print("*** {} params ***".format(self.name)) for k in params: print("# {} = {}".format(k, params[k])) # Build model self.model = self.build_model()
Builds TFT from parameters. Args: raw_params: Parameters to define TFT use_cudnn: Whether to use CUDNN GPU optimised LSTM
__init__
python
microsoft/qlib
examples/benchmarks/TFT/libs/tft_model.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
MIT
def get_tft_embeddings(self, all_inputs): """Transforms raw inputs to embeddings. Applies linear transformation onto continuous variables and uses embeddings for categorical variables. Args: all_inputs: Inputs to transform Returns: Tensors for transformed inputs. """ time_steps = self.time_steps # Sanity checks for i in self._known_regular_input_idx: if i in self._input_obs_loc: raise ValueError("Observation cannot be known a priori!") for i in self._input_obs_loc: if i in self._static_input_loc: raise ValueError("Observation cannot be static!") if all_inputs.get_shape().as_list()[-1] != self.input_size: raise ValueError( "Illegal number of inputs! Inputs observed={}, expected={}".format( all_inputs.get_shape().as_list()[-1], self.input_size ) ) num_categorical_variables = len(self.category_counts) num_regular_variables = self.input_size - num_categorical_variables embedding_sizes = [self.hidden_layer_size for i, size in enumerate(self.category_counts)] embeddings = [] for i in range(num_categorical_variables): embedding = tf.keras.Sequential( [ tf.keras.layers.InputLayer([time_steps]), tf.keras.layers.Embedding( self.category_counts[i], embedding_sizes[i], input_length=time_steps, dtype=tf.float32 ), ] ) embeddings.append(embedding) regular_inputs, categorical_inputs = ( all_inputs[:, :, :num_regular_variables], all_inputs[:, :, num_regular_variables:], ) embedded_inputs = [embeddings[i](categorical_inputs[Ellipsis, i]) for i in range(num_categorical_variables)] # Static inputs if self._static_input_loc: static_inputs = [ tf.keras.layers.Dense(self.hidden_layer_size)(regular_inputs[:, 0, i : i + 1]) for i in range(num_regular_variables) if i in self._static_input_loc ] + [ embedded_inputs[i][:, 0, :] for i in range(num_categorical_variables) if i + num_regular_variables in self._static_input_loc ] static_inputs = tf.keras.backend.stack(static_inputs, axis=1) else: static_inputs = None def convert_real_to_embedding(x): """Applies linear transformation for time-varying inputs.""" return tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(self.hidden_layer_size))(x) # Targets obs_inputs = tf.keras.backend.stack( [convert_real_to_embedding(regular_inputs[Ellipsis, i : i + 1]) for i in self._input_obs_loc], axis=-1 ) # Observed (a prioir unknown) inputs wired_embeddings = [] for i in range(num_categorical_variables): if i not in self._known_categorical_input_idx and i + num_regular_variables not in self._input_obs_loc: e = embeddings[i](categorical_inputs[:, :, i]) wired_embeddings.append(e) unknown_inputs = [] for i in range(regular_inputs.shape[-1]): if i not in self._known_regular_input_idx and i not in self._input_obs_loc: e = convert_real_to_embedding(regular_inputs[Ellipsis, i : i + 1]) unknown_inputs.append(e) if unknown_inputs + wired_embeddings: unknown_inputs = tf.keras.backend.stack(unknown_inputs + wired_embeddings, axis=-1) else: unknown_inputs = None # A priori known inputs known_regular_inputs = [ convert_real_to_embedding(regular_inputs[Ellipsis, i : i + 1]) for i in self._known_regular_input_idx if i not in self._static_input_loc ] known_categorical_inputs = [ embedded_inputs[i] for i in self._known_categorical_input_idx if i + num_regular_variables not in self._static_input_loc ] known_combined_layer = tf.keras.backend.stack(known_regular_inputs + known_categorical_inputs, axis=-1) return unknown_inputs, known_combined_layer, obs_inputs, static_inputs
Transforms raw inputs to embeddings. Applies linear transformation onto continuous variables and uses embeddings for categorical variables. Args: all_inputs: Inputs to transform Returns: Tensors for transformed inputs.
get_tft_embeddings
python
microsoft/qlib
examples/benchmarks/TFT/libs/tft_model.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
MIT
def cache_batched_data(self, data, cache_key, num_samples=-1): """Batches and caches data once for using during training. Args: data: Data to batch and cache cache_key: Key used for cache num_samples: Maximum number of samples to extract (-1 to use all data) """ if num_samples > 0: TFTDataCache.update(self._batch_sampled_data(data, max_samples=num_samples), cache_key) else: TFTDataCache.update(self._batch_data(data), cache_key) print('Cached data "{}" updated'.format(cache_key))
Batches and caches data once for using during training. Args: data: Data to batch and cache cache_key: Key used for cache num_samples: Maximum number of samples to extract (-1 to use all data)
cache_batched_data
python
microsoft/qlib
examples/benchmarks/TFT/libs/tft_model.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
MIT
def _batch_sampled_data(self, data, max_samples): """Samples segments into a compatible format. Args: data: Sources data to sample and batch max_samples: Maximum number of samples in batch Returns: Dictionary of batched data with the maximum samples specified. """ if max_samples < 1: raise ValueError("Illegal number of samples specified! samples={}".format(max_samples)) id_col = self._get_single_col_by_type(InputTypes.ID) time_col = self._get_single_col_by_type(InputTypes.TIME) data.sort_values(by=[id_col, time_col], inplace=True) print("Getting valid sampling locations.") valid_sampling_locations = [] split_data_map = {} for identifier, df in data.groupby(id_col, group_key=False): print("Getting locations for {}".format(identifier)) num_entries = len(df) if num_entries >= self.time_steps: valid_sampling_locations += [ (identifier, self.time_steps + i) for i in range(num_entries - self.time_steps + 1) ] split_data_map[identifier] = df inputs = np.zeros((max_samples, self.time_steps, self.input_size)) outputs = np.zeros((max_samples, self.time_steps, self.output_size)) time = np.empty((max_samples, self.time_steps, 1), dtype=object) identifiers = np.empty((max_samples, self.time_steps, 1), dtype=object) if max_samples > 0 and len(valid_sampling_locations) > max_samples: print("Extracting {} samples...".format(max_samples)) ranges = [ valid_sampling_locations[i] for i in np.random.choice(len(valid_sampling_locations), max_samples, replace=False) ] else: print("Max samples={} exceeds # available segments={}".format(max_samples, len(valid_sampling_locations))) ranges = valid_sampling_locations id_col = self._get_single_col_by_type(InputTypes.ID) time_col = self._get_single_col_by_type(InputTypes.TIME) target_col = self._get_single_col_by_type(InputTypes.TARGET) input_cols = [tup[0] for tup in self.column_definition if tup[2] not in {InputTypes.ID, InputTypes.TIME}] for i, tup in enumerate(ranges): if (i + 1 % 1000) == 0: print(i + 1, "of", max_samples, "samples done...") identifier, start_idx = tup sliced = split_data_map[identifier].iloc[start_idx - self.time_steps : start_idx] inputs[i, :, :] = sliced[input_cols] outputs[i, :, :] = sliced[[target_col]] time[i, :, 0] = sliced[time_col] identifiers[i, :, 0] = sliced[id_col] sampled_data = { "inputs": inputs, "outputs": outputs[:, self.num_encoder_steps :, :], "active_entries": np.ones_like(outputs[:, self.num_encoder_steps :, :]), "time": time, "identifier": identifiers, } return sampled_data
Samples segments into a compatible format. Args: data: Sources data to sample and batch max_samples: Maximum number of samples in batch Returns: Dictionary of batched data with the maximum samples specified.
_batch_sampled_data
python
microsoft/qlib
examples/benchmarks/TFT/libs/tft_model.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
MIT
def _batch_data(self, data): """Batches data for training. Converts raw dataframe from a 2-D tabular format to a batched 3-D array to feed into Keras model. Args: data: DataFrame to batch Returns: Batched Numpy array with shape=(?, self.time_steps, self.input_size) """ # Functions. def _batch_single_entity(input_data): time_steps = len(input_data) lags = self.time_steps x = input_data.values if time_steps >= lags: return np.stack([x[i : time_steps - (lags - 1) + i, :] for i in range(lags)], axis=1) else: return None id_col = self._get_single_col_by_type(InputTypes.ID) time_col = self._get_single_col_by_type(InputTypes.TIME) target_col = self._get_single_col_by_type(InputTypes.TARGET) input_cols = [tup[0] for tup in self.column_definition if tup[2] not in {InputTypes.ID, InputTypes.TIME}] data_map = {} for _, sliced in data.groupby(id_col, group_keys=False): col_mappings = {"identifier": [id_col], "time": [time_col], "outputs": [target_col], "inputs": input_cols} for k in col_mappings: cols = col_mappings[k] arr = _batch_single_entity(sliced[cols].copy()) if k not in data_map: data_map[k] = [arr] else: data_map[k].append(arr) # Combine all data for k in data_map: # Wendi: Avoid returning None when the length is not enough data_map[k] = np.concatenate([i for i in data_map[k] if i is not None], axis=0) # Shorten target so we only get decoder steps data_map["outputs"] = data_map["outputs"][:, self.num_encoder_steps :, :] active_entries = np.ones_like(data_map["outputs"]) if "active_entries" not in data_map: data_map["active_entries"] = active_entries else: data_map["active_entries"].append(active_entries) return data_map
Batches data for training. Converts raw dataframe from a 2-D tabular format to a batched 3-D array to feed into Keras model. Args: data: DataFrame to batch Returns: Batched Numpy array with shape=(?, self.time_steps, self.input_size)
_batch_data
python
microsoft/qlib
examples/benchmarks/TFT/libs/tft_model.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
MIT
def _build_base_graph(self): """Returns graph defining layers of the TFT.""" # Size definitions. time_steps = self.time_steps combined_input_size = self.input_size encoder_steps = self.num_encoder_steps # Inputs. all_inputs = tf.keras.layers.Input( shape=( time_steps, combined_input_size, ) ) unknown_inputs, known_combined_layer, obs_inputs, static_inputs = self.get_tft_embeddings(all_inputs) # Isolate known and observed historical inputs. if unknown_inputs is not None: historical_inputs = concat( [ unknown_inputs[:, :encoder_steps, :], known_combined_layer[:, :encoder_steps, :], obs_inputs[:, :encoder_steps, :], ], axis=-1, ) else: historical_inputs = concat( [known_combined_layer[:, :encoder_steps, :], obs_inputs[:, :encoder_steps, :]], axis=-1 ) # Isolate only known future inputs. future_inputs = known_combined_layer[:, encoder_steps:, :] def static_combine_and_mask(embedding): """Applies variable selection network to static inputs. Args: embedding: Transformed static inputs Returns: Tensor output for variable selection network """ # Add temporal features _, num_static, _ = embedding.get_shape().as_list() flatten = tf.keras.layers.Flatten()(embedding) # Nonlinear transformation with gated residual network. mlp_outputs = gated_residual_network( flatten, self.hidden_layer_size, output_size=num_static, dropout_rate=self.dropout_rate, use_time_distributed=False, additional_context=None, ) sparse_weights = tf.keras.layers.Activation("softmax")(mlp_outputs) sparse_weights = K.expand_dims(sparse_weights, axis=-1) trans_emb_list = [] for i in range(num_static): e = gated_residual_network( embedding[:, i : i + 1, :], self.hidden_layer_size, dropout_rate=self.dropout_rate, use_time_distributed=False, ) trans_emb_list.append(e) transformed_embedding = concat(trans_emb_list, axis=1) combined = tf.keras.layers.Multiply()([sparse_weights, transformed_embedding]) static_vec = K.sum(combined, axis=1) return static_vec, sparse_weights static_encoder, static_weights = static_combine_and_mask(static_inputs) static_context_variable_selection = gated_residual_network( static_encoder, self.hidden_layer_size, dropout_rate=self.dropout_rate, use_time_distributed=False ) static_context_enrichment = gated_residual_network( static_encoder, self.hidden_layer_size, dropout_rate=self.dropout_rate, use_time_distributed=False ) static_context_state_h = gated_residual_network( static_encoder, self.hidden_layer_size, dropout_rate=self.dropout_rate, use_time_distributed=False ) static_context_state_c = gated_residual_network( static_encoder, self.hidden_layer_size, dropout_rate=self.dropout_rate, use_time_distributed=False ) def lstm_combine_and_mask(embedding): """Apply temporal variable selection networks. Args: embedding: Transformed inputs. Returns: Processed tensor outputs. """ # Add temporal features _, time_steps, embedding_dim, num_inputs = embedding.get_shape().as_list() flatten = K.reshape(embedding, [-1, time_steps, embedding_dim * num_inputs]) expanded_static_context = K.expand_dims(static_context_variable_selection, axis=1) # Variable selection weights mlp_outputs, static_gate = gated_residual_network( flatten, self.hidden_layer_size, output_size=num_inputs, dropout_rate=self.dropout_rate, use_time_distributed=True, additional_context=expanded_static_context, return_gate=True, ) sparse_weights = tf.keras.layers.Activation("softmax")(mlp_outputs) sparse_weights = tf.expand_dims(sparse_weights, axis=2) # Non-linear Processing & weight application trans_emb_list = [] for i in range(num_inputs): grn_output = gated_residual_network( embedding[Ellipsis, i], self.hidden_layer_size, dropout_rate=self.dropout_rate, use_time_distributed=True, ) trans_emb_list.append(grn_output) transformed_embedding = stack(trans_emb_list, axis=-1) combined = tf.keras.layers.Multiply()([sparse_weights, transformed_embedding]) temporal_ctx = K.sum(combined, axis=-1) return temporal_ctx, sparse_weights, static_gate historical_features, historical_flags, _ = lstm_combine_and_mask(historical_inputs) future_features, future_flags, _ = lstm_combine_and_mask(future_inputs) # LSTM layer def get_lstm(return_state): """Returns LSTM cell initialized with default parameters.""" if self.use_cudnn: lstm = tf.keras.layers.CuDNNLSTM( self.hidden_layer_size, return_sequences=True, return_state=return_state, stateful=False, ) else: lstm = tf.keras.layers.LSTM( self.hidden_layer_size, return_sequences=True, return_state=return_state, stateful=False, # Additional params to ensure LSTM matches CuDNN, See TF 2.0 : # (https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTM) activation="tanh", recurrent_activation="sigmoid", recurrent_dropout=0, unroll=False, use_bias=True, ) return lstm history_lstm, state_h, state_c = get_lstm(return_state=True)( historical_features, initial_state=[static_context_state_h, static_context_state_c] ) future_lstm = get_lstm(return_state=False)(future_features, initial_state=[state_h, state_c]) lstm_layer = concat([history_lstm, future_lstm], axis=1) # Apply gated skip connection input_embeddings = concat([historical_features, future_features], axis=1) lstm_layer, _ = apply_gating_layer(lstm_layer, self.hidden_layer_size, self.dropout_rate, activation=None) temporal_feature_layer = add_and_norm([lstm_layer, input_embeddings]) # Static enrichment layers expanded_static_context = K.expand_dims(static_context_enrichment, axis=1) enriched, _ = gated_residual_network( temporal_feature_layer, self.hidden_layer_size, dropout_rate=self.dropout_rate, use_time_distributed=True, additional_context=expanded_static_context, return_gate=True, ) # Decoder self attention self_attn_layer = InterpretableMultiHeadAttention( self.num_heads, self.hidden_layer_size, dropout=self.dropout_rate ) mask = get_decoder_mask(enriched) x, self_att = self_attn_layer(enriched, enriched, enriched, mask=mask) x, _ = apply_gating_layer(x, self.hidden_layer_size, dropout_rate=self.dropout_rate, activation=None) x = add_and_norm([x, enriched]) # Nonlinear processing on outputs decoder = gated_residual_network( x, self.hidden_layer_size, dropout_rate=self.dropout_rate, use_time_distributed=True ) # Final skip connection decoder, _ = apply_gating_layer(decoder, self.hidden_layer_size, activation=None) transformer_layer = add_and_norm([decoder, temporal_feature_layer]) # Attention components for explainability attention_components = { # Temporal attention weights "decoder_self_attn": self_att, # Static variable selection weights "static_flags": static_weights[Ellipsis, 0], # Variable selection weights of past inputs "historical_flags": historical_flags[Ellipsis, 0, :], # Variable selection weights of future inputs "future_flags": future_flags[Ellipsis, 0, :], } return transformer_layer, all_inputs, attention_components
Returns graph defining layers of the TFT.
_build_base_graph
python
microsoft/qlib
examples/benchmarks/TFT/libs/tft_model.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
MIT
def static_combine_and_mask(embedding): """Applies variable selection network to static inputs. Args: embedding: Transformed static inputs Returns: Tensor output for variable selection network """ # Add temporal features _, num_static, _ = embedding.get_shape().as_list() flatten = tf.keras.layers.Flatten()(embedding) # Nonlinear transformation with gated residual network. mlp_outputs = gated_residual_network( flatten, self.hidden_layer_size, output_size=num_static, dropout_rate=self.dropout_rate, use_time_distributed=False, additional_context=None, ) sparse_weights = tf.keras.layers.Activation("softmax")(mlp_outputs) sparse_weights = K.expand_dims(sparse_weights, axis=-1) trans_emb_list = [] for i in range(num_static): e = gated_residual_network( embedding[:, i : i + 1, :], self.hidden_layer_size, dropout_rate=self.dropout_rate, use_time_distributed=False, ) trans_emb_list.append(e) transformed_embedding = concat(trans_emb_list, axis=1) combined = tf.keras.layers.Multiply()([sparse_weights, transformed_embedding]) static_vec = K.sum(combined, axis=1) return static_vec, sparse_weights
Applies variable selection network to static inputs. Args: embedding: Transformed static inputs Returns: Tensor output for variable selection network
static_combine_and_mask
python
microsoft/qlib
examples/benchmarks/TFT/libs/tft_model.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
MIT
def lstm_combine_and_mask(embedding): """Apply temporal variable selection networks. Args: embedding: Transformed inputs. Returns: Processed tensor outputs. """ # Add temporal features _, time_steps, embedding_dim, num_inputs = embedding.get_shape().as_list() flatten = K.reshape(embedding, [-1, time_steps, embedding_dim * num_inputs]) expanded_static_context = K.expand_dims(static_context_variable_selection, axis=1) # Variable selection weights mlp_outputs, static_gate = gated_residual_network( flatten, self.hidden_layer_size, output_size=num_inputs, dropout_rate=self.dropout_rate, use_time_distributed=True, additional_context=expanded_static_context, return_gate=True, ) sparse_weights = tf.keras.layers.Activation("softmax")(mlp_outputs) sparse_weights = tf.expand_dims(sparse_weights, axis=2) # Non-linear Processing & weight application trans_emb_list = [] for i in range(num_inputs): grn_output = gated_residual_network( embedding[Ellipsis, i], self.hidden_layer_size, dropout_rate=self.dropout_rate, use_time_distributed=True, ) trans_emb_list.append(grn_output) transformed_embedding = stack(trans_emb_list, axis=-1) combined = tf.keras.layers.Multiply()([sparse_weights, transformed_embedding]) temporal_ctx = K.sum(combined, axis=-1) return temporal_ctx, sparse_weights, static_gate
Apply temporal variable selection networks. Args: embedding: Transformed inputs. Returns: Processed tensor outputs.
lstm_combine_and_mask
python
microsoft/qlib
examples/benchmarks/TFT/libs/tft_model.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
MIT
def get_lstm(return_state): """Returns LSTM cell initialized with default parameters.""" if self.use_cudnn: lstm = tf.keras.layers.CuDNNLSTM( self.hidden_layer_size, return_sequences=True, return_state=return_state, stateful=False, ) else: lstm = tf.keras.layers.LSTM( self.hidden_layer_size, return_sequences=True, return_state=return_state, stateful=False, # Additional params to ensure LSTM matches CuDNN, See TF 2.0 : # (https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTM) activation="tanh", recurrent_activation="sigmoid", recurrent_dropout=0, unroll=False, use_bias=True, ) return lstm
Returns LSTM cell initialized with default parameters.
get_lstm
python
microsoft/qlib
examples/benchmarks/TFT/libs/tft_model.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
MIT
def build_model(self): """Build model and defines training losses. Returns: Fully defined Keras model. """ with tf.variable_scope(self.name): transformer_layer, all_inputs, attention_components = self._build_base_graph() outputs = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(self.output_size * len(self.quantiles)))( transformer_layer[Ellipsis, self.num_encoder_steps :, :] ) self._attention_components = attention_components adam = tf.keras.optimizers.Adam(lr=self.learning_rate, clipnorm=self.max_gradient_norm) model = tf.keras.Model(inputs=all_inputs, outputs=outputs) print(model.summary()) valid_quantiles = self.quantiles output_size = self.output_size class QuantileLossCalculator: """Computes the combined quantile loss for prespecified quantiles. Attributes: quantiles: Quantiles to compute losses """ def __init__(self, quantiles): """Initializes computer with quantiles for loss calculations. Args: quantiles: Quantiles to use for computations. """ self.quantiles = quantiles def quantile_loss(self, a, b): """Returns quantile loss for specified quantiles. Args: a: Targets b: Predictions """ quantiles_used = set(self.quantiles) loss = 0.0 for i, quantile in enumerate(valid_quantiles): if quantile in quantiles_used: loss += utils.tensorflow_quantile_loss( a[Ellipsis, output_size * i : output_size * (i + 1)], b[Ellipsis, output_size * i : output_size * (i + 1)], quantile, ) return loss quantile_loss = QuantileLossCalculator(valid_quantiles).quantile_loss model.compile(loss=quantile_loss, optimizer=adam, sample_weight_mode="temporal") self._input_placeholder = all_inputs return model
Build model and defines training losses. Returns: Fully defined Keras model.
build_model
python
microsoft/qlib
examples/benchmarks/TFT/libs/tft_model.py
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
MIT