repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
dongzhuoyao/embedding-propagation
[ "1f14947bbd8be8a9950e7c4093fbfed0536809b9" ]
[ "src/models/ssl_wrapper.py" ]
[ "\"\"\"\nFew-Shot Parallel: trains a model as a series of tasks computed in parallel on multiple GPUs\n\n\"\"\"\nimport copy\nimport numpy as np\nimport os\nfrom .base_ssl import oracle\nfrom scipy.stats import sem, t\nimport torch\nimport pandas as pd\nimport torch.nn.functional as F\nimport tqdm\nfrom src.tools.meters import BasicMeter\nfrom src.modules.distances import standarized_label_prop, _propagate, prototype_distance\nfrom .base_wrapper import BaseWrapper\nfrom haven import haven_utils as hu\nimport glob\nfrom scipy.stats import sem, t\nimport shutil as sh\nfrom .base_ssl import selection_methods as sm\nfrom .base_ssl import predict_methods as pm\nfrom embedding_propagation import EmbeddingPropagation\n\nclass SSLWrapper(BaseWrapper):\n \"\"\"Trains a model using an episodic scheme on multiple GPUs\"\"\"\n\n def __init__(self, model, n_classes, exp_dict, pretrained_savedir=None, savedir_base=None):\n \"\"\" Constructor\n Args:\n model: architecture to train\n exp_dict: reference to dictionary with the global state of the application\n \"\"\"\n super().__init__()\n self.model = model\n self.exp_dict = exp_dict \n self.ngpu = self.exp_dict[\"ngpu\"]\n self.predict_method = exp_dict['predict_method']\n\n self.model.add_classifier(n_classes, modalities=0)\n self.nclasses = n_classes\n\n best_accuracy = -1 \n self.label = exp_dict['model']['backbone'] + \"_\" + exp_dict['dataset_test'].split('_')[1].replace('-imagenet','')\n print('=============')\n print('dataset:', exp_dict[\"dataset_train\"].split('_')[-1]) \n print('backbone:', exp_dict['model'][\"backbone\"])\n print('n_classes:', exp_dict['n_classes'])\n print('support_size_train:', exp_dict['support_size_train'])\n\n if pretrained_savedir is None:\n # find the best checkpoint\n savedir_base = exp_dict[\"finetuned_weights_root\"]\n if not os.path.exists(savedir_base):\n raise ValueError(\"Please set the variable named \\\n 'finetuned_weights_root' with the path of the folder \\\n with the episodic finetuning experiments\")\n for exp_hash in os.listdir(savedir_base):\n base_path = os.path.join(savedir_base, exp_hash)\n exp_dict_path = os.path.join(base_path, 'exp_dict.json')\n if not os.path.exists(exp_dict_path):\n continue\n loaded_exp_dict = hu.load_json(exp_dict_path)\n pkl_path = os.path.join(base_path, 'score_list_best.pkl')\n\n if exp_dict['support_size_train'] in [2,3,4]:\n support_size_needed = 1\n else:\n support_size_needed = exp_dict['support_size_train']\n\n \"\"\"\n if (loaded_exp_dict[\"model\"][\"name\"] == 'finetuning' and \n loaded_exp_dict[\"dataset_train\"].split('_')[-1] == exp_dict[\"dataset_train\"].split('_')[-1] and \n loaded_exp_dict[\"model\"][\"backbone\"] == exp_dict['model'][\"backbone\"] and\n #loaded_exp_dict['n_classes'] == exp_dict[\"n_classes\"] and, maybe different because cross-domain evaluation.\n loaded_exp_dict['support_size_train'] == support_size_needed,\n loaded_exp_dict[\"embedding_prop\"] == exp_dict[\"embedding_prop\"]):\n \"\"\"\n if True:\n \n model_path = os.path.join(base_path, 'checkpoint_best.pth')\n\n try:\n print(\"Attempting to load \", model_path)\n accuracy = hu.load_pkl(pkl_path)[-1][\"val_accuracy\"]\n state = torch.load(model_path)['model']\n state_keys = list(state.keys())\n for i, key in enumerate(state_keys):\n # module. to be compatible to DP pretraining\n # _resnet50->_model, history problem!\n newkey = key\n state[newkey] = state.pop(key)\n if 'classifier' in newkey:\n state.pop(newkey)\n self.model.load_state_dict(state, strict=False)\n if accuracy > best_accuracy:\n best_path = os.path.join(base_path, 'checkpoint_best.pth')\n best_accuracy = accuracy\n except Exception as e:\n print(e)\n \n assert(best_accuracy > 0.1)\n print(\"Finetuning %s with original accuracy : %f\" %(base_path, best_accuracy))\n\n state = torch.load(model_path)['model']\n state_keys = list(state.keys())\n for i, key in enumerate(state_keys):\n # module. to be compatible to DP pretraining\n # _resnet50->_model, history problem!\n newkey = key\n state[newkey] = state.pop(key)\n if 'classifier' in newkey:\n state.pop(newkey)\n self.model.load_state_dict(state, strict=False)\n\n self.best_accuracy = best_accuracy\n self.acc_sum = 0.0\n self.n_count = 0\n self.model.cuda()\n\n def get_embeddings(self, embeddings, support_size, query_size, nclasses):\n b, c = embeddings.size()\n \n if self.exp_dict[\"embedding_prop\"] == True:\n embeddings = EmbeddingPropagation()(embeddings)\n return embeddings.view(b, c)\n\n def get_episode_dict(self, batch):\n nclasses = batch[\"nclasses\"]\n support_size = batch[\"support_size\"]\n query_size = batch[\"query_size\"]\n k = (support_size + query_size)\n c = batch[\"channels\"]\n h = batch[\"height\"]\n w = batch[\"width\"]\n\n tx = batch[\"support_set\"].view(support_size, nclasses, c, h, w).cuda(non_blocking=True)\n vx = batch[\"query_set\"].view(query_size, nclasses, c, h, w).cuda(non_blocking=True)\n ux = batch[\"unlabeled_set\"].view(batch[\"unlabeled_size\"], nclasses, c, h, w).cuda(non_blocking=True)\n x = torch.cat([tx, vx, ux], 0)\n x = x.view(-1, c, h, w).cuda(non_blocking=True)\n\n if self.ngpu > 1:\n features = self.parallel_model(x, is_support=True)\n else:\n features = self.model(x, is_support=True)\n\n embeddings = self.get_embeddings(features, \n support_size, \n query_size+\n batch['unlabeled_size'], \n nclasses) # (b, channels)\n \n uniques = np.unique(batch['targets'])\n labels = torch.zeros(batch['targets'].shape[0])\n for i, u in enumerate(uniques):\n labels[batch['targets']==u] = i\n\n ## perform ssl\n # 1. indices\n episode_dict = {}\n ns = support_size*nclasses\n nq = query_size*nclasses\n episode_dict[\"support\"] = {'samples':embeddings[:ns], \n 'labels':labels[:ns]}\n episode_dict[\"query\"] = {'samples':embeddings[ns:ns+nq], \n 'labels':labels[ns:ns+nq]}\n episode_dict[\"unlabeled\"] = {'samples':embeddings[ns+nq:]}\n # batch[\"support_so_far\"] = {'samples':embeddings, \n # 'labels':labels}\n\n \n for k, v in episode_dict.items():\n episode_dict[k]['samples'] = episode_dict[k]['samples'].cpu().numpy()\n if 'labels' in episode_dict[k]:\n episode_dict[k]['labels'] = episode_dict[k]['labels'].cpu().numpy().astype(int)\n return episode_dict\n\n def predict_on_batch(self, episode_dict, support_size_max=None):\n is_inductive = self.exp_dict.get(\"inductive\")#default 0\n\n ind_selected = sm.get_indices(selection_method=\"ssl\",\n episode_dict=episode_dict,\n support_size_max=support_size_max,is_inductive=is_inductive)\n episode_dict = update_episode_dict(ind_selected, episode_dict)\n\n if is_inductive:\n pred_labels = []\n for q_id in range(episode_dict['query']['labels'].shape[0]):\n episode_dict_1query = copy.deepcopy(episode_dict)\n episode_dict_1query['query']['samples'] = episode_dict['query']['samples'][q_id:q_id + 1] # keep dim\n episode_dict_1query['query']['labels']= episode_dict['query']['labels'][q_id:q_id+1]#keep dim\n pred_label_1query = pm.get_predictions(predict_method=self.predict_method,\n episode_dict=episode_dict_1query)\n pred_labels.append(int(pred_label_1query))\n pred_labels = np.asarray(pred_labels)\n return pred_labels\n else:\n pred_labels = pm.get_predictions(predict_method=self.predict_method,\n episode_dict=episode_dict)\n return pred_labels\n\n def val_on_batch(self, batch):\n # if self.exp_dict['ora']\n if self.exp_dict.get(\"pretrained_weights_root\") == 'hdf5':\n episode_dict = self.sampler.sample_episode(int(self.exp_dict['support_size_test']), \n self.exp_dict['query_size_test'], \n self.exp_dict['unlabeled_size_test'], \n apply_ten_flag=self.exp_dict.get(\"apply_ten_flag\"))\n else:\n episode_dict = self.get_episode_dict(batch)\n episode_dict[\"support_so_far\"] = copy.deepcopy(episode_dict[\"support\"])\n episode_dict[\"n_classes\"] = 5\n\n pred_labels = self.predict_on_batch(episode_dict, support_size_max=self.exp_dict['unlabeled_size_test']*self.exp_dict['classes_test'])\n accuracy = oracle.compute_acc(pred_labels=pred_labels, \n true_labels=episode_dict[\"query\"][\"labels\"])\n\n # query_labels = episode_dict[\"query\"][\"labels\"]\n # accuracy = float((pred_labels == query_labels.cuda()).float().mean())\n \n self.acc_sum += accuracy\n self.n_count += 1\n return -1, accuracy\n\n @torch.no_grad()\n def test_on_loader(self, data_loader, max_iter=None):\n \"\"\"Iterate over the validation set\n\n Args:\n data_loader: iterable validation data loader\n max_iter: max number of iterations to perform if the end of the dataset is not reached\n \"\"\"\n self.model.eval()\n\n test_accuracy_meter = BasicMeter.get(\"test_accuracy\").reset()\n test_accuracy = []\n # Iterate through tasks, each iteration loads n tasks, with n = number of GPU\n # dirname = os.path.split(self.exp_dict[\"pretrained_weights_root\"])[-1]\n with tqdm.tqdm(total=len(data_loader)) as pbar:\n for batch_all in data_loader:\n batch = batch_all[0]\n loss, accuracy = self.val_on_batch(batch)\n\n test_accuracy_meter.update(float(accuracy), 1)\n test_accuracy.append(float(accuracy))\n\n string = (\"'%s' - ssl: %.3f\" % \n (self.label, \n # dirname, \n test_accuracy_meter.mean()))\n # print(string)\n pbar.update(1)\n pbar.set_description(string)\n \n confidence = 0.95\n n = len(test_accuracy)\n std_err = sem(np.array(test_accuracy))\n h = std_err * t.ppf((1 + confidence) / 2, n - 1)\n return {\"test_loss\": -1, \n \"ssl_accuracy\": test_accuracy_meter.mean(), \n \"ssl_confidence\": h,\n 'finetuned_accuracy': self.best_accuracy}\n\ndef update_episode_dict(ind, episode_dict):\n # 1. update supports so far\n selected_samples = episode_dict[\"unlabeled\"][\"samples\"][ind]\n selected_labels = episode_dict[\"unlabeled\"][\"labels\"][ind]\n \n selected_support_dict = {\"samples\": selected_samples, \"labels\": selected_labels}\n\n for k, v in episode_dict[\"support_so_far\"].items():\n episode_dict[\"support_so_far\"][k] = np.concatenate([v, selected_support_dict[k]], axis=0)\n\n # 2. update unlabeled samples\n n_unlabeled = episode_dict[\"unlabeled\"][\"samples\"].shape[0]\n ind_rest = np.setdiff1d(np.arange(n_unlabeled), ind)\n\n new_unlabeled_dict = {}\n for k, v in episode_dict[\"unlabeled\"].items():\n new_unlabeled_dict[k] = v[ind_rest]\n \n episode_dict[\"unlabeled\"] = new_unlabeled_dict\n\n return episode_dict" ]
[ [ "torch.zeros", "numpy.unique", "torch.cat", "numpy.arange", "numpy.asarray", "torch.load", "numpy.concatenate", "scipy.stats.t.ppf", "torch.no_grad", "numpy.array" ] ]
yavuzdrmzksr/SD-FSIC
[ "29eb91c4d6e5ef91b1a7e4e3c425b0b751ba877b" ]
[ "models/FCModel.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import *\nimport misc.utils as utils\n\nfrom .CaptionModel import CaptionModel\n\nclass LSTMCore(nn.Module):\n def __init__(self, opt):\n super(LSTMCore, self).__init__()\n self.input_encoding_size = opt.input_encoding_size\n self.rnn_size = opt.rnn_size\n self.drop_prob_lm = opt.drop_prob_lm\n \n # Build a LSTM\n self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)\n self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)\n self.dropout = nn.Dropout(self.drop_prob_lm)\n\n def forward(self, xt, state):\n \n all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])\n sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)\n sigmoid_chunk = torch.sigmoid(sigmoid_chunk)\n in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)\n forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)\n out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)\n\n in_transform = torch.max(\\\n all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size),\n all_input_sums.narrow(1, 4 * self.rnn_size, self.rnn_size))\n next_c = forget_gate * state[1][-1] + in_gate * in_transform\n next_h = out_gate * torch.tanh(next_c)\n\n output = self.dropout(next_h)\n state = (next_h.unsqueeze(0), next_c.unsqueeze(0))\n return output, state\n\nclass FCModel(CaptionModel):\n def __init__(self, opt):\n super(FCModel, self).__init__()\n self.vocab_size = opt.vocab_size\n self.input_encoding_size = opt.input_encoding_size\n self.rnn_type = opt.rnn_type\n self.rnn_size = opt.rnn_size\n self.num_layers = opt.num_layers\n self.drop_prob_lm = opt.drop_prob_lm\n self.seq_length = opt.seq_length\n self.fc_feat_size = opt.fc_feat_size\n\n self.ss_prob = 0.0 # Schedule sampling probability\n\n self.img_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)\n self.core = LSTMCore(opt)\n self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)\n self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)\n\n self.init_weights()\n\n def init_weights(self):\n initrange = 0.1\n self.embed.weight.data.uniform_(-initrange, initrange)\n self.logit.bias.data.fill_(0)\n self.logit.weight.data.uniform_(-initrange, initrange)\n\n def init_hidden(self, bsz):\n weight = next(self.parameters())\n if self.rnn_type == 'lstm':\n return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),\n weight.new_zeros(self.num_layers, bsz, self.rnn_size))\n else:\n return weight.new_zeros(self.num_layers, bsz, self.rnn_size)\n\n def _forward(self, fc_feats, att_feats, seq, att_masks=None):\n batch_size = fc_feats.size(0)\n seq_per_img = seq.shape[0] // batch_size\n state = self.init_hidden(batch_size*seq_per_img)\n outputs = []\n\n if seq_per_img > 1:\n fc_feats = utils.repeat_tensors(seq_per_img, fc_feats)\n\n for i in range(seq.size(1)):\n if i == 0:\n xt = self.img_embed(fc_feats)\n else:\n if self.training and i >= 2 and self.ss_prob > 0.0: # otherwiste no need to sample\n sample_prob = fc_feats.data.new(batch_size*seq_per_img).uniform_(0, 1)\n sample_mask = sample_prob < self.ss_prob\n if sample_mask.sum() == 0:\n it = seq[:, i-1].clone()\n else:\n sample_ind = sample_mask.nonzero().view(-1)\n it = seq[:, i-1].data.clone()\n #prob_prev = torch.exp(outputs[-1].data.index_select(0, sample_ind)) # fetch prev distribution: shape Nx(M+1)\n #it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1))\n prob_prev = torch.exp(outputs[-1].data) # fetch prev distribution: shape Nx(M+1)\n it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))\n else:\n it = seq[:, i-1].clone()\n # break if all the sequences end\n if i >= 2 and seq[:, i-1].sum() == 0:\n break\n xt = self.embed(it)\n\n output, state = self.core(xt, state)\n output = F.log_softmax(self.logit(output), dim=1)\n outputs.append(output)\n\n return torch.cat([_.unsqueeze(1) for _ in outputs[1:]], 1).contiguous()\n\n def get_logprobs_state(self, it, state):\n # 'it' is contains a word index\n xt = self.embed(it)\n\n output, state = self.core(xt, state)\n logprobs = F.log_softmax(self.logit(output), dim=1)\n\n return logprobs, state\n\n def _sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):\n beam_size = opt.get('beam_size', 10)\n batch_size = fc_feats.size(0)\n\n assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'\n seq = torch.LongTensor(self.seq_length, batch_size).zero_()\n seqLogprobs = torch.FloatTensor(self.seq_length, batch_size, self.vocab_size + 1)\n # lets process every image independently for now, for simplicity\n\n self.done_beams = [[] for _ in range(batch_size)]\n for k in range(batch_size):\n state = self.init_hidden(beam_size)\n for t in range(2):\n if t == 0:\n xt = self.img_embed(fc_feats[k:k+1]).expand(beam_size, self.input_encoding_size)\n elif t == 1: # input <bos>\n it = fc_feats.data.new(beam_size).long().zero_()\n xt = self.embed(it)\n\n output, state = self.core(xt, state)\n logprobs = F.log_softmax(self.logit(output), dim=1)\n\n self.done_beams[k] = self.beam_search(state, logprobs, opt=opt)\n seq[:, k] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score\n seqLogprobs[:, k] = self.done_beams[k][0]['logps']\n # return the samples and their log likelihoods\n return seq.transpose(0, 1), seqLogprobs.transpose(0, 1)\n\n def _sample(self, fc_feats, att_feats, att_masks=None, opt={}):\n sample_method = opt.get('sample_method', 'greedy')\n beam_size = opt.get('beam_size', 1)\n temperature = opt.get('temperature', 1.0)\n if beam_size > 1 and sample_method in ['greedy', 'beam_search']:\n return self._sample_beam(fc_feats, att_feats, opt)\n\n batch_size = fc_feats.size(0)\n state = self.init_hidden(batch_size)\n seq = fc_feats.new_zeros(batch_size, self.seq_length, dtype=torch.long)\n seqLogprobs = fc_feats.new_zeros(batch_size, self.seq_length, self.vocab_size + 1)\n for t in range(self.seq_length + 2):\n if t == 0:\n xt = self.img_embed(fc_feats)\n else:\n if t == 1: # input <bos>\n it = fc_feats.data.new(batch_size).long().zero_()\n xt = self.embed(it)\n\n output, state = self.core(xt, state)\n logprobs = F.log_softmax(self.logit(output), dim=1)\n\n # sample the next_word\n if t == self.seq_length + 1: # skip if we achieve maximum length\n break\n if sample_method == 'greedy':\n sampleLogprobs, it = torch.max(logprobs.data, 1)\n it = it.view(-1).long()\n else:\n if temperature == 1.0:\n prob_prev = torch.exp(logprobs.data).cpu() # fetch prev distribution: shape Nx(M+1)\n else:\n # scale logprobs by temperature\n prob_prev = torch.exp(torch.div(logprobs.data, temperature)).cpu()\n it = torch.multinomial(prob_prev, 1).cuda()\n sampleLogprobs = logprobs.gather(1, it) # gather the logprobs at sampled positions\n it = it.view(-1).long() # and flatten indices for downstream processing\n\n if t >= 1:\n # stop when all finished\n if t == 1:\n unfinished = it > 0\n else:\n unfinished = unfinished * (it > 0)\n it = it * unfinished.type_as(it)\n seq[:,t-1] = it #seq[t] the input of t+2 time step\n seqLogprobs[:,t-1] = sampleLogprobs.view(-1)\n if unfinished.sum() == 0:\n break\n\n return seq, seqLogprobs\n" ]
[ [ "torch.div", "torch.nn.Dropout", "torch.sigmoid", "torch.LongTensor", "torch.max", "torch.nn.Embedding", "torch.multinomial", "torch.tanh", "torch.nn.Linear", "torch.exp", "torch.FloatTensor" ] ]
rhgao/ObjectFolder
[ "91e56bcbf3dcea9b5604f53055133ddbb00fcfe2" ]
[ "AudioNet_model.py" ]
[ "import torch\ntorch.autograd.set_detect_anomaly(True)\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom AudioNet_utils import *\n\nclass DenseLayer(nn.Linear):\n def __init__(self, in_dim: int, out_dim: int, activation: str = 'relu', *args, **kwargs) -> None:\n self.activation = activation\n super().__init__(in_dim, out_dim, *args, **kwargs)\n\n def reset_parameters(self) -> None:\n torch.nn.init.xavier_uniform_(self.weight, gain=torch.nn.init.calculate_gain(self.activation))\n if self.bias is not None:\n torch.nn.init.zeros_(self.bias)\n\nclass Embedder:\n def __init__(self, **kwargs):\n self.kwargs = kwargs\n self.create_embedding_fn()\n\n def create_embedding_fn(self):\n embed_fns = []\n d = self.kwargs['input_dims']\n out_dim = 0\n if self.kwargs['include_input']:\n embed_fns.append(lambda x: x)\n out_dim += d\n\n max_freq = self.kwargs['max_freq_log2']\n N_freqs = self.kwargs['num_freqs']\n\n if self.kwargs['log_sampling']:\n freq_bands = 2.**torch.linspace(0., max_freq, steps=N_freqs)\n else:\n freq_bands = torch.linspace(2.**0., 2.**max_freq, steps=N_freqs)\n\n for freq in freq_bands:\n for p_fn in self.kwargs['periodic_fns']:\n embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq))\n out_dim += d\n\n self.embed_fns = embed_fns\n self.out_dim = out_dim\n\n def embed(self, inputs):\n return torch.cat([fn(inputs) for fn in self.embed_fns], -1)\n\ndef get_embedder(multires, i=0):\n if i == -1:\n #5 is for x, y, z, f, t\n return nn.Identity(), 5\n\n embed_kwargs = {\n 'include_input': True,\n 'input_dims': 3,\n 'max_freq_log2': multires-1,\n 'num_freqs': multires,\n 'log_sampling': True,\n 'periodic_fns': [torch.sin, torch.cos],\n }\n\n embedder_obj = Embedder(**embed_kwargs)\n embed = lambda x, eo=embedder_obj: eo.embed(x)\n return embed, embedder_obj.out_dim\n\nclass AudioNeRF(nn.Module):\n def __init__(self, D=8, input_ch=5, output_ch=2):\n super(AudioNeRF, self).__init__()\n self.model_x = NeRF(D = D, input_ch = input_ch, output_ch = output_ch)\n self.model_y = NeRF(D = D, input_ch = input_ch, output_ch = output_ch)\n self.model_z = NeRF(D = D, input_ch = input_ch, output_ch = output_ch)\n\n def forward(self, embedded_x, embedded_y, embedded_z):\n results_x = self.model_x(embedded_x)\n results_y = self.model_y(embedded_y)\n results_z = self.model_z(embedded_z)\n return results_x, results_y, results_z\n\n\nclass NeRF(nn.Module):\n def __init__(self, D=8, W=256, input_ch=5, input_ch_views=0, output_ch=2, skips=[4], use_viewdirs=False):\n \"\"\"\n \"\"\"\n super(NeRF, self).__init__()\n self.D = D\n self.W = W\n self.input_ch = input_ch\n self.input_ch_views = input_ch_views\n self.skips = skips\n self.use_viewdirs = use_viewdirs\n\n self.pts_linears = nn.ModuleList(\n [DenseLayer(input_ch, W, activation='relu')] + [DenseLayer(W, W, activation='relu') if i not in self.skips else DenseLayer(W + input_ch, W, activation='relu') for i in range(D-1)])\n\n self.views_linears = nn.ModuleList([DenseLayer(input_ch_views + W, W//2, activation='relu')])\n\n if use_viewdirs:\n self.feature_linear = DenseLayer(W, W, activation='sigmoid')\n #self.alpha_linear = DenseLayer(W, 1, activation='linear')\n self.rgb_linear = DenseLayer(W//2, output_ch, activation='sigmoid')\n else:\n self.output_linear = DenseLayer(W, output_ch, activation='sigmoid')\n\n\n def forward(self, x):\n input_pts, input_views = torch.split(x, [self.input_ch, self.input_ch_views], dim=-1)\n h = input_pts\n for i, l in enumerate(self.pts_linears):\n h = self.pts_linears[i](h)\n h = F.relu(h)\n if i in self.skips:\n h = torch.cat([input_pts, h], -1)\n\n if self.use_viewdirs:\n feature = self.feature_linear(h)\n h = torch.cat([feature, input_views], -1)\n\n for i, l in enumerate(self.views_linears):\n h = self.views_linears[i](h)\n h = F.relu(h)\n\n outputs = self.rgb_linear(h)\n else:\n outputs = self.output_linear(h)\n\n return outputs\n" ]
[ [ "torch.nn.init.calculate_gain", "torch.linspace", "torch.autograd.set_detect_anomaly", "torch.cat", "torch.split", "torch.nn.Identity", "torch.nn.functional.relu", "torch.nn.init.zeros_" ] ]
TheYargonaut/lucre
[ "1abd472993df01b443ab4811379dfe52e18cf790" ]
[ "Main.py" ]
[ "from ChartWidget import ChartWidget\nfrom EditGroupWindow import editGroupCb\nfrom Format import FormatMan\nfrom Group import GroupMan\nfrom GroupControlWidget import GroupList\nfrom ImportLedgerWindow import importLedgerCb\nfrom Ledger import Ledger\nfrom PlotSettings import PlotSettings\nfrom PlotSettingsWidget import PlotSettingsWidget\nfrom Scrollable import Scrollable\nfrom ViewLedgerWindow import viewLedgerCb\n\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom tkinter import ttk\nimport matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport tkinter as tk\n\npd.plotting.register_matplotlib_converters()\n\nclass MainWindow( tk.Tk ):\n def __init__( self ):\n tk.Tk.__init__( self )\n\n self.plotSettings = PlotSettings()\n\n self.format = FormatMan()\n self.group = GroupMan()\n self.ledger = Ledger()\n self.loadData()\n\n self.build()\n\n def loadData( self ):\n self.format.load()\n self.group.load()\n self.ledger.load()\n\n def redraw( self, *args ):\n if self.chartWidget:\n self.chartWidget.draw()\n \n def editGroup( self, idx ):\n editGroupCb( self, self.group.groups[ idx ], self.ledger, 25 )()\n \n def viewLedger( self ):\n viewLedgerCb( self, self.group, self.ledger )()\n \n def activateGroup( self, label, state ):\n self.group.setActive( label, state )\n\n def build( self ):\n self.grid_rowconfigure( 0, weight=1 )\n self.grid_columnconfigure( 0, weight=1 )\n\n self.chartWidget = ChartWidget( self, self.group, self.ledger, self.plotSettings )\n self.chartWidget.grid( row=0, column=0, sticky=tk.NSEW )\n\n controlFrame = ttk.Frame( self )\n controlFrame.grid( row=0, column=1, sticky=tk.NSEW )\n\n controlFrame.grid_columnconfigure( 0, weight=1 )\n controlFrame.grid_rowconfigure( 2, weight=1 )\n\n importLedgerButton = ttk.Button(\n controlFrame, text=\"Import Ledger\",\n command=importLedgerCb( self, self.ledger, self.format, 25 )\n )\n importLedgerButton.grid( row=0, column=0, sticky=tk.NSEW )\n viewLedgerButton = ttk.Button( controlFrame, text=\"Browse Ledger\", command=self.viewLedger )\n viewLedgerButton.grid( row=1, column=0, sticky=tk.NSEW )\n\n groupScroll = Scrollable( controlFrame, vertical=True )\n groupScroll.grid( row=2, column=0, sticky=tk.NSEW )\n groupList = GroupList(\n groupScroll,\n self.group.groups,\n \"New Group\",\n self.group.create,\n self.activateGroup,\n self.editGroup\n )\n groupList.pack()\n\n self.plotSettingsWidget = PlotSettingsWidget( controlFrame, self.plotSettings )\n self.plotSettingsWidget.grid( row=3, column=0, sticky=tk.NSEW )\n\n# make the window\ntop = MainWindow()\ntop.title( \"lucre\" )\ntop.tk.call( 'wm', 'iconphoto', top._w, tk.PhotoImage( file='logo.png' ) )\ntop.mainloop()\n\n# save current settings\nuserDataPath = os.path.join( '.', 'userdata' )\nif not os.path.exists( userDataPath ):\n os.mkdir( userDataPath )\ntop.format.save()\ntop.ledger.save()\ntop.group.save()" ]
[ [ "pandas.plotting.register_matplotlib_converters" ] ]
ChristopherDaigle/daigle_dist
[ "e716bdd8ce357461160b7a12510525738f914cdf" ]
[ "daigle_dist/Gaussiandistribution.py" ]
[ "import math\nimport matplotlib.pyplot as plt\nfrom .Generaldistribution import Distribution\n\nclass Gaussian(Distribution):\n\t\"\"\" Gaussian distribution class for calculating and \n\tvisualizing a Gaussian distribution.\n\t\n\tAttributes:\n\t\tmean (float) representing the mean value of the distribution\n\t\tstdev (float) representing the standard deviation of the distribution\n\t\tdata_list (list of floats) a list of floats extracted from the data file\n\t\t\t\n\t\"\"\"\n\tdef __init__(self, mu=0, sigma=1):\n\t\t\n\t\tDistribution.__init__(self, mu, sigma)\n\t\n\t\t\n\t\n\tdef calculate_mean(self):\n\t\n\t\t\"\"\"Function to calculate the mean of the data set.\n\t\t\n\t\tArgs: \n\t\t\tNone\n\t\t\n\t\tReturns: \n\t\t\tfloat: mean of the data set\n\t\n\t\t\"\"\"\n\t\t\t\t\t\n\t\tavg = 1.0 * sum(self.data) / len(self.data)\n\t\t\n\t\tself.mean = avg\n\t\t\n\t\treturn self.mean\n\n\n\n\tdef calculate_stdev(self, sample=True):\n\n\t\t\"\"\"Function to calculate the standard deviation of the data set.\n\t\t\n\t\tArgs: \n\t\t\tsample (bool): whether the data represents a sample or population\n\t\t\n\t\tReturns: \n\t\t\tfloat: standard deviation of the data set\n\t\n\t\t\"\"\"\n\n\t\tif sample:\n\t\t\tn = len(self.data) - 1\n\t\telse:\n\t\t\tn = len(self.data)\n\t\n\t\tmean = self.calculate_mean()\n\t\n\t\tsigma = 0\n\t\n\t\tfor d in self.data:\n\t\t\tsigma += (d - mean) ** 2\n\t\t\n\t\tsigma = math.sqrt(sigma / n)\n\t\n\t\tself.stdev = sigma\n\t\t\n\t\treturn self.stdev\n\t\t\n\t\t\n\t\t\n\tdef plot_histogram(self):\n\t\t\"\"\"Function to output a histogram of the instance variable data using \n\t\tmatplotlib pyplot library.\n\t\t\n\t\tArgs:\n\t\t\tNone\n\t\t\t\n\t\tReturns:\n\t\t\tNone\n\t\t\"\"\"\n\t\tplt.hist(self.data)\n\t\tplt.title('Histogram of Data')\n\t\tplt.xlabel('data')\n\t\tplt.ylabel('count')\n\t\t\n\t\t\n\t\t\n\tdef pdf(self, x):\n\t\t\"\"\"Probability density function calculator for the gaussian distribution.\n\t\t\n\t\tArgs:\n\t\t\tx (float): point for calculating the probability density function\n\t\t\t\n\t\t\n\t\tReturns:\n\t\t\tfloat: probability density function output\n\t\t\"\"\"\n\t\t\n\t\treturn (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)\n\t\t\n\n\tdef plot_histogram_pdf(self, n_spaces = 50):\n\n\t\t\"\"\"Function to plot the normalized histogram of the data and a plot of the \n\t\tprobability density function along the same range\n\t\t\n\t\tArgs:\n\t\t\tn_spaces (int): number of data points \n\t\t\n\t\tReturns:\n\t\t\tlist: x values for the pdf plot\n\t\t\tlist: y values for the pdf plot\n\t\t\t\n\t\t\"\"\"\n\t\t\n\t\tmu = self.mean\n\t\tsigma = self.stdev\n\n\t\tmin_range = min(self.data)\n\t\tmax_range = max(self.data)\n\t\t\n\t\t # calculates the interval between x values\n\t\tinterval = 1.0 * (max_range - min_range) / n_spaces\n\n\t\tx = []\n\t\ty = []\n\t\t\n\t\t# calculate the x values to visualize\n\t\tfor i in range(n_spaces):\n\t\t\ttmp = min_range + interval*i\n\t\t\tx.append(tmp)\n\t\t\ty.append(self.pdf(tmp))\n\n\t\t# make the plots\n\t\tfig, axes = plt.subplots(2,sharex=True)\n\t\tfig.subplots_adjust(hspace=.5)\n\t\taxes[0].hist(self.data, density=True)\n\t\taxes[0].set_title('Normed Histogram of Data')\n\t\taxes[0].set_ylabel('Density')\n\n\t\taxes[1].plot(x, y)\n\t\taxes[1].set_title('Normal Distribution for \\n Sample Mean and Sample Standard Deviation')\n\t\taxes[0].set_ylabel('Density')\n\t\tplt.show()\n\n\t\treturn x, y\n\t\t\n\tdef __add__(self, other):\n\t\t\n\t\t\"\"\"Function to add together two Gaussian distributions\n\t\t\n\t\tArgs:\n\t\t\tother (Gaussian): Gaussian instance\n\t\t\t\n\t\tReturns:\n\t\t\tGaussian: Gaussian distribution\n\t\t\t\n\t\t\"\"\"\n\t\t\n\t\tresult = Gaussian()\n\t\tresult.mean = self.mean + other.mean\n\t\tresult.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)\n\t\t\n\t\treturn result\n\t\t\n\t\t\n\tdef __repr__(self):\n\t\n\t\t\"\"\"Function to output the characteristics of the Gaussian instance\n\t\t\n\t\tArgs:\n\t\t\tNone\n\t\t\n\t\tReturns:\n\t\t\tstring: characteristics of the Gaussian\n\t\t\n\t\t\"\"\"\n\t\t\n\t\treturn f\"mean {self.mean}, standard deviation {self.stdev}\"" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.subplots", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.hist", "matplotlib.pyplot.ylabel" ] ]
xigang/ml
[ "1d8891b5bc66af8506694ba2d618624ae37f59a6" ]
[ "tensorflow/linear_model.py" ]
[ "import tensorflow as tf\n\n# linear_model: y = W*x + b\nW = tf.Variable([.1], dtype=tf.float32)\nb = tf.Variable([-.1], dtype=tf.float32)\n\nx = tf.placeholder(tf.float32, name='x')\ny = tf.placeholder(tf.float32, name='y')\n\n# create linear model\nlinear_model = W * x + b\n\n# create loss model\nwith tf.name_scope(\"loss-model\"):\n loss = tf.reduce_sum(tf.square(linear_model -y))\n #Add scalar to the output of the loss model to observe the convergence curve of loss\n tf.summary.scalar(\"loss\", loss)\n\n# create a optimizer use Gradient Descent algorithm.\noptimizer = tf.train.GradientDescentOptimizer(0.001)\ntrain = optimizer.minimize(loss)\n\n# create session use compute\nsess = tf.Session()\ninit = tf.global_variables_initializer()\nsess.run(init)\n\n# merges all sunmaries collected in the default graph\nmerged = tf.summary.merge_all()\n\n#All data generated by the model run is saved to the /tmp/tensorflow folder for use by TensorBoard\nwriter = tf.summary.FileWriter('/tmp/tensorflow', sess.graph)\n\n# train dataset\nx_train = [1, 2, 3, 6, 8]\ny_train = [4.8, 8.5, 10.4, 21.0, 25.3]\n\n# Training 10,000 times\nfor i in range(10000):\n # Pass in merge during training\n summary, _ = sess.run([merged, train], {x: x_train, y: y_train})\n # collected output train data\n writer.add_summary(summary, i)\n\ncurrent_W, current_b, current_loss = sess.run([W, b, loss], {x: x_train, y: y_train})\n\n# Print the results after training\nprint(\"After train W: %s b: %s, loss: %s\" % (current_W, current_b, current_loss))\n" ]
[ [ "tensorflow.summary.FileWriter", "tensorflow.Variable", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.summary.merge_all", "tensorflow.name_scope", "tensorflow.Session", "tensorflow.square", "tensorflow.summary.scalar" ] ]
iag0g0mes/t2_fis_driving_style
[ "7f62ac3e67e65e7bd1273a2f845eb05820e95b70" ]
[ "analysis/analysis_results.py" ]
[ "import numpy as np\nfrom typing import Any, Dict, List, Tuple, NoReturn\n\n\nimport argparse\nimport os\n\nimport pandas as pd\nimport json\n\nfrom sklearn.metrics import silhouette_score \nfrom sklearn.metrics import calinski_harabasz_score \nfrom sklearn.metrics import davies_bouldin_score \n\ndef parse_arguments() -> Any:\n\t\"\"\"Parse command line arguments.\"\"\"\n\n\tparser = argparse.ArgumentParser()\n\t\n\tparser.add_argument(\n\t\t\"--data_dir\",\n\t\tdefault=\"\",\n\t\ttype=str,\n\t\thelp=\"Directory where the results of each algorithm (csv files) are saved\",\n\t)\t\n\n\tparser.add_argument(\n\t\t\"--result_dir\",\n\t\tdefault=\"\",\n\t\ttype=str,\n\t\thelp=\"Directory where the result of the analysis (csv files) should be saved\",\n\t)\n\n\treturn parser.parse_args()\n\ndef discriptive_analysis(X:np.ndarray, Y:np.ndarray) -> Dict:\n\n\t\n\t'''\n\t\tcompute descriptive statistics metrics of each cluster\n\n\t\t- metrics:\n\t\t\t- central tendency : mean\n\t\t\t- dispersion : std\n\t\t\t- bounds : min, max\n\t\t\t- quantile : 0.25, 0.5, 0.75\n\t\t- params:\n\t\t\t- X : np.ndarray : [mean_velocity, mean_acceleration,\n\t\t\t\t\t\t\t\tmean_deceleratuin, std_lateral_jerk] (m, 4)\n\t\t\t- Y : np.ndarray : [driving_style] (m)\n\t\t- return:\n\t\t\t result : dict : metrics\n\t'''\n\n\tresult = {}\n\n\tfor c in range(0,3):\n\t\tindexes = np.where(Y==c)[0]\n\n\t\t_mean = 0.0 if len(indexes) == 0 else np.mean(X[indexes], axis=0).tolist()\n\t\t_std = 0.0 if len(indexes) == 0 else np.std(X[indexes], axis=0).tolist()\n\t\t_min = 0.0 if len(indexes) == 0 else np.min(X[indexes], axis=0).tolist()\n\t\t_max = 0.0 if len(indexes) == 0 else np.max(X[indexes], axis=0).tolist()\n\t\t_q25 = 0.0 if len(indexes) == 0 else np.quantile(X[indexes], 0.25, axis=0).tolist()\n\t\t_q50 = 0.0 if len(indexes) == 0 else np.quantile(X[indexes], 0.50, axis=0).tolist()\n\t\t_q75 = 0.0 if len(indexes) == 0 else np.quantile(X[indexes], 0.75, axis=0).tolist()\n\n\t\tresult[f'{c}'] = {}\n\t\tresult[f'{c}']['mean'] = _mean \n\t\tresult[f'{c}']['std'] = _std \n\t\tresult[f'{c}']['min'] = _min \n\t\tresult[f'{c}']['max'] = _max \n\t\tresult[f'{c}']['q25'] = _q25 \n\t\tresult[f'{c}']['q50'] = _q50 \n\t\tresult[f'{c}']['q75'] = _q75 \n\n\t\t#print (result)\n\n\treturn result\n\ndef clustering_analysis(X:np.ndarray, Y:np.ndarray) -> Dict:\n\n\t'''\n\t\tcompute clustering analysis evaluation metrics\n\n\t\t- metrics:\n\t\t\t- Silhouette Coefficient\n\t\t\t- Calinski Harabasz Score\n\t\t\t- Davis-Bouldin Index\n\t\t- params:\n\t\t\t- X : np.ndarray : [mean_velocity, mean_acceleration,\n\t\t\t\t\t\t\t\tmean_deceleratuin, std_lateral_jerk] (m, 4)\n\t\t\t- Y : np.ndarray : [driving_style] (m)\n\t\t- return:\n\t\t\t result : dict : metric\n\t'''\n\n\tresult = {}\n\tresult['silhouette'] = 0\n\tresult['calinski'] = 0\n\tresult['davis'] = 0\n\t\n\tif len(np.unique(Y)) > 1:\n\t\tresult['silhouette'] = silhouette_score(X=X, labels=Y)\n\t\tresult['calinski'] = calinski_harabasz_score(X=X, labels=Y)\n\t\tresult['davis'] = davies_bouldin_score(X=X, labels=Y)\n\n\treturn result\n\nif __name__== '__main__':\n\n\t'''\n\t\t- compute evaluation metrics for each algorithm\n\t\t- based on clustering analysis and descriptive statistics\n\n\t\t- metrics:\n\n\t\t\t- Silhouette Coefficient\n\t\t\t\t+ https://en.wikipedia.org/wiki/Silhouette_(clustering)\n\t\t\t\t+ https://www.sciencedirect.com/science/article/pii/0377042787901257\n\t\t\t\t+ https://scikit-learn.org/stable/modules/generated/sklearn.metrics\n\t\t\t\t\t.silhouette_score.html#sklearn-metrics-silhouette-score\n\t\t\t- Calinski Harabasz Score\n\t\t\t\t+ https://medium.com/@haataa/how-to-measure-clustering-performances\n\t\t\t\t\t-when-there-are-no-ground-truth-db027e9a871c\n\t\t\t\t+ https://www.tandfonline.com/doi/abs/10.1080/03610927408827101\n\t\t\t\t+ https://scikit-learn.org/stable/modules/generated/sklearn.metrics\n\t\t\t\t\t.calinski_harabasz_score.html#sklearn.metrics.calinski_harabasz_score\n\t\t\t- Davis-Bouldin Index\n\t\t\t\t+ https://en.wikipedia.org/wiki/Davies%E2%80%93Bouldin_index\n\t\t\t\t+ https://ieeexplore.ieee.org/document/4766909\n\t\t\t\t+ https://scikit-learn.org/stable/modules/generated/sklearn.metrics\n\t\t\t\t\t.davies_bouldin_score.html#sklearn.metrics.davies_bouldin_score\n\t\t\t- Mean, STD, min, max, Q-25, Q-50, Q-75\n\n\t'''\n\n\t\n\targs = parse_arguments()\n\n\tassert os.path.exists(args.data_dir),\\\n\t\tf'[Clustering Analysis][main][ERROR] data_dir not found!({args.data_dir})'\n\n\n\tresult_files = os.listdir(args.data_dir)\n\t\n\tassert len(result_files) > 0,\\\n\t\tf'[Clustering Analysis][main][ERROR] no file found in data_dir directory!({args.data_dir})'\n\n\n\tfor result_file in result_files:\n\t\t#results_fuzzy_cm_train_2s_ekf\n\t\tif not result_file.endswith(f'csv'):\n\t\t\tcontinue\n\n\t\tprint (f'[Clustering Analysis][main] \\033[92mprocessing file\\033[0m: {result_file}')\n\n\t\t_split = result_file.split('_') \n\t\tmode = _split[-3]\n\t\tobs_len = int(_split[-2][0])\n\t\tfilter_name = _split[-1].split('.')[0]\n\n\t\tprint (f'[Clustering Analysis][main] loading data...')\n\n\t\t# [mean_v, mean_acc, mean_deac, std_jy, y]\n\t\tdata = pd.read_csv(os.path.join(args.data_dir, result_file))\n\n\t\tX = data[['mean_velocity', 'mean_acceleration', 'mean_deceleration', 'std_lateral_jerk']].values\n\t\tY = np.asarray(data[['driving_style']].values, dtype=int)\n\t\tY = np.squeeze(Y)\n\n\t\n\t\tprint (f'[Clustering Analysis][main] computing metrics...')\n\t\tc_a_result = clustering_analysis(X=X, Y=Y)\n\t\tstats_result = discriptive_analysis(X=X, Y=Y)\n\t\t\n\n\t\t\n\t\tresult = dict(c_a_result)\n\t\tresult.update(stats_result)\n\n\t\tresult_file_name = f'analysis_{mode}_{obs_len}s_{filter_name}.json'\n\t\tprint (f'[Clustering Analysis][main] saving results ({result_file_name})')\n\t\twith open(os.path.join(args.result_dir, result_file_name), 'w') as f:\n\t\t\tjson.dump(result, f)\n\t\t\n\t" ]
[ [ "sklearn.metrics.silhouette_score", "numpy.asarray", "numpy.unique", "numpy.squeeze", "numpy.min", "numpy.quantile", "numpy.max", "numpy.std", "sklearn.metrics.davies_bouldin_score", "numpy.mean", "numpy.where", "sklearn.metrics.calinski_harabasz_score" ] ]
e5120/EDAs
[ "acf86fa35182b8fe0cd913d6fb46280b2f9e6e46" ]
[ "eda/optimizer/replacement/restricted_tournament.py" ]
[ "import numpy as np\n\nfrom eda.optimizer.replacement import ReplacementBase\n\n\nclass RestrictedTournament(ReplacementBase):\n \"\"\"\n A class of restricted tournament replacement (RTR).\n \"\"\"\n def __init__(self, dim, replace_rate=0.5, window_size=2, fix_size=True):\n super(RestrictedTournament, self).__init__(replace_rate, fix_size=fix_size)\n \"\"\"\n Parameters\n ----------\n dim : int\n The dimension of the problem.\n window_size : int, default 2\n A user parameter which determines trade-off between the goodness and the diversity in the population.\n \"\"\"\n assert 0 < dim\n ws = int(dim / 20)\n self.window_size = ws if ws > 1 and window_size > ws else window_size\n\n def apply(self, parent, p_evals, candidate, c_evals):\n p_lam = parent.shape[0]\n c_lam = candidate.shape[0]\n replaced_lam = int(p_lam * self.replace_rate)\n assert replaced_lam == c_lam, \\\n \"The number of individuals for the replacement({}) must match the population size of candidate({})\".format(replaced_lam, c_lam)\n sampled_idx = np.random.randint(0, p_lam, (self.window_size, c_lam))\n # In the case of (population_size, dim)\n if len(parent.shape) == 2:\n distances = np.sum(parent[sampled_idx] != candidate, axis=2)\n # In the case of (population_size, dim, one_hot)\n elif len(parent.shape) == 3:\n distances = np.sum(np.argmax(parent[sampled_idx], axis=3) != np.argmax(candidate, axis=2), axis=2)\n else:\n print(\"error. shape of ndarray is wrong\")\n exit()\n target_idx = sampled_idx[np.argmin(distances, axis=0), np.arange(c_lam)]\n for c_idx, p_idx in enumerate(target_idx):\n if p_evals[p_idx] > c_evals[c_idx]:\n p_evals[p_idx] = c_evals[c_idx]\n parent[p_idx] = candidate[c_idx]\n return parent, p_evals\n\n def __str__(self):\n sup_str = \" \" + super(RestrictedTournament, self).__str__().replace(\"\\n\", \"\\n \")\n return 'Restricted Tournament Replacement(\\n' \\\n '{}\\n' \\\n ' window size: {}' \\\n '\\n)'.format(sup_str, self.window_size)\n" ]
[ [ "numpy.arange", "numpy.argmax", "numpy.argmin", "numpy.sum", "numpy.random.randint" ] ]
qwe79137/JumpStarter
[ "e59ee341f31d7cc9fde05b6f395d29d4d63130e4" ]
[ "detector/cs_anomaly_detector.py" ]
[ "import numpy as np\nimport os\nfrom algorithm.cluster import cluster\nfrom algorithm.cvxpy import reconstruct\nfrom algorithm.sampling import localized_sample\nfrom cvxpy.error import SolverError\nfrom multiprocessing import Process, Event, Queue\nfrom threading import Thread\n\nmax_seed = 10 ** 9 + 7\n\n\nclass CycleFeatureProcess(Process):\n \"\"\"\n 计算单个周期内特征的线程\n \"\"\"\n\n def __init__(\n self,\n task_queue: Queue,\n result_queue: Queue,\n cluster_threshold: float\n ):\n \"\"\"\n :param task_queue: 作业队列\n :param result_queue: 结果队列\n :param cluster_threshold: 聚类阈值\n \"\"\"\n super().__init__()\n self.task_queue = task_queue\n self.result_queue = result_queue\n self.cluster_threshold = cluster_threshold\n\n def run(self):\n print('CycleFeatureProcess-%d: start' % os.getpid())\n while not self.task_queue.empty():\n group_index, cycle_data = self.task_queue.get()\n self.result_queue.put(\n (group_index, cluster(cycle_data, self.cluster_threshold))\n )\n print(\n 'CycleFeatureProcess-%d: finish task-%d' %\n (os.getpid(), group_index)\n )\n print('CycleFeatureProcess-%d: exit' % os.getpid())\n\n\nclass WindowReconstructProcess(Process):\n \"\"\"\n 窗口重建工作进程\n \"\"\"\n\n def __init__(\n self,\n data: np.array,\n task_queue: Queue,\n result_queue: Queue,\n cycle: int,\n latest_windows: int,\n sample_score_method,\n sample_rate: float,\n scale: float,\n rho: float,\n sigma: float,\n random_state: int,\n without_localize_sampling: bool,\n retry_limit: int,\n task_return_event: Event()\n ):\n \"\"\"\n :param data: 原始数据的拷贝\n :param task_queue: 作业队列\n :param result_queue: 结果队列\n :param cycle: 周期\n :param latest_windows: 计算采样价值指标时参考的最近历史周期数\n :param sample_score_method: 计算采样价值指标方法\n :param sample_rate: 采样率\n :param scale: 采样参数: 等距采样点扩充倍数\n :param rho: 采样参数: 中心采样概率\n :param sigma: 采样参数: 采样集中程度\n :param random_state: 随机数种子\n :param without_localize_sampling: 是否不按局部化采样算法进行采样\n :param retry_limit: 每个窗口重试的上限\n :param task_return_event: 当一个作业被完成时触发的事件, 通知主进程收集\n \"\"\"\n super().__init__()\n self.data = data\n self.task_queue = task_queue\n self.result_queue = result_queue\n self.cycle = cycle\n self.latest_windows = latest_windows\n self.sample_score_method = sample_score_method\n self.sample_rate = sample_rate\n self.without_localize_sampling = without_localize_sampling\n self.scale = scale\n self.rho = rho\n self.sigma = sigma\n self.random_state = random_state\n self.retry_limit = retry_limit\n self.task_return_event = task_return_event\n\n def run(self):\n from time import time\n if self.random_state:\n np.random.seed(self.random_state)\n tot = time()\n data_process = 0\n wait_syn = 0\n rec = 0\n sample_scoring = 0\n print('WindowReconstructProcess-%d: start' % os.getpid())\n while not self.task_queue.empty():\n t = time()\n wb, we, group = self.task_queue.get()\n wait_syn += time() - t\n t = time()\n hb = max(0, wb - self.latest_windows)\n latest = self.data[hb:wb]\n window_data = self.data[wb:we]\n data_process += time() - t\n t = time()\n sample_score = self.sample_score_method(window_data, latest)\n sample_scoring += time() - t\n t = time()\n rec_window, retries = \\\n self.window_sample_reconstruct(\n data=window_data,\n groups=group,\n score=sample_score,\n random_state=self.random_state * wb * we % max_seed\n )\n rec += time() - t\n t = time()\n self.result_queue.put((wb, we, rec_window, retries, sample_score))\n self.task_return_event.set()\n wait_syn += time() - t\n print(\n 'WindowReconstructProcess-%d: window[%d, %d) done' %\n (os.getpid(), wb, we)\n )\n tot = time() - tot\n print('WindowReconstructProcess-%d: exit' % os.getpid())\n print(\n 'tot: %f\\ndata_process: %f\\nwait_syn: %f\\nrec: %f\\n'\n 'sample_scoring:%f\\n'\n % (tot, data_process, wait_syn, rec, sample_scoring)\n )\n\n def sample(self, x: np.array, m: int, score: np.array, random_state: int):\n \"\"\"\n 取得采样的数据\n :param x: kpi等距时间序列, shape=(n,d), n是行数, d是维度\n :param m: 采样个数\n :param score: 采样点置信度\n :param random_state: 采样随机种子\n :return: 采样序列数组X, X[i][0]是0~n-1的实数, 表示该采样点的时间点,\n X[i][1] 是shape=(k,)的数组, 表示该时间点各个维度kpi数据 0<i<m\n 已经按X[i][0]升序排序\n \"\"\"\n n, d = x.shape\n data_mat = np.mat(x)\n sample_matrix, timestamp = localized_sample(\n x=data_mat, m=m,\n score=score,\n scale=self.scale, rho=self.rho, sigma=self.sigma,\n random_state=random_state\n )\n # 采样中心对应的位置\n s = np.array(sample_matrix * data_mat)\n res = []\n for i in range(m):\n res.append((timestamp[i], s[i]))\n res.sort(key=lambda each: each[0])\n res = np.array(res)\n timestamp = np.array(res[:, 0]).astype(int)\n values = np.zeros((m, d))\n for i in range(m):\n values[i, :] = res[i][1]\n return timestamp, values\n\n def window_sample_reconstruct(\n self,\n data: np.array,\n groups: list,\n score: np.array,\n random_state: int\n ):\n \"\"\"\n :param data: 原始数据\n :param groups: 分组\n :param score: 这个窗口的每一个点的采样可信度\n :param random_state: 随机种子\n :return: 重建数据, 重建尝试次数\n \"\"\"\n # 数据量, 维度\n n, d = data.shape\n retry_count = 0\n sample_rate = self.sample_rate\n while True:\n try:\n if self.without_localize_sampling:\n if random_state:\n np.random.seed(random_state)\n timestamp = np.random.choice(\n np.arange(n),\n size=int(np.round(sample_rate * n)),\n replace=False\n )\n values = data[timestamp]\n else:\n timestamp, values = \\\n self.sample(\n data,\n int(np.round(sample_rate * n)),\n score,\n random_state\n )\n rec = np.zeros(shape=(n, d))\n for i in range(len(groups)):\n x_re = reconstruct(\n n, len(groups[i]), timestamp,\n values[:, groups[i]]\n )\n for j in range(len(groups[i])):\n rec[:, groups[i][j]] = x_re[:, j]\n break\n except SolverError:\n if retry_count > self.retry_limit:\n raise Exception(\n 'retry failed, please try higher sample rate or '\n 'window size'\n )\n sample_rate += (1 - sample_rate) / 4\n retry_count += 1\n from sys import stderr\n stderr.write(\n 'WARNING: reconstruct failed, retry with higher '\n 'sample rate %f, retry times remain %d\\n'\n % (\n sample_rate, self.retry_limit - retry_count)\n )\n return rec, retry_count\n\n\nclass CSAnomalyDetector:\n \"\"\"\n 基于压缩感知采样重建的离线多进程异常检测器\n \"\"\"\n\n def __init__(\n self,\n cluster_threshold: float,\n sample_rate: float,\n sample_score_method,\n distance,\n workers: int = 1,\n latest_windows: int = 96,\n scale: float = 5,\n rho: float = 0.1,\n sigma: float = 1 / 24,\n random_state=None,\n retry_limit=10,\n without_grouping: str = None,\n without_localize_sampling: bool = False,\n ):\n \"\"\"\n :param cluster_threshold: 聚类参数: 阈值\n :param sample_rate: 采样率\n :param sample_score_method: 采样点可信度计算函数 输入(array(n * d))输出\n array(n) 表示输入的n个点的采样可信度\n :param distance: 计算距离的函数, 输入 (array(n * d), array(n * d)) 输出\n real表示两个输入之间的距离\n :param workers: 计算线程数\n :param latest_windows: 采样时参考的历史窗口数\n :param scale: 采样参数: 等距采样点扩充倍数\n :param rho: 采样参数: 中心采样概率\n :param sigma: 采样参数: 采样集中程度\n :param random_state: 随机数种子\n :param retry_limit: 求解重试次数, 超过次数求解仍未成功, 则抛出异常\n :param without_grouping: 降级实验: 不进行分组\n :param without_localize_sampling: 降级实验: 完全随机采样\n \"\"\"\n if sample_rate > 1 or sample_rate <= 0:\n raise Exception('invalid sample rate: %s' % sample_rate)\n if without_grouping and without_grouping not in \\\n {'one_by_one', 'all_by_one'}:\n raise Exception('unknown without grouping option')\n self._scale = scale\n self._rho = rho\n self._sigma = sigma\n self._sample_rate = sample_rate\n self._cluster_threshold = cluster_threshold\n self._random_state = random_state\n self._latest_windows = latest_windows\n # 采样点可信度计算方法\n self._sample_score_method = sample_score_method\n # 距离计算方法\n self._distance = distance\n # 重试参数\n self._retry_limit = retry_limit\n # 最大工作线程数\n self._workers = workers\n # 降级实验\n self._without_grouping = without_grouping\n self._without_localize_sampling = without_localize_sampling\n\n def reconstruct(\n self, data: np.array,\n window: int,\n windows_per_cycle: int,\n stride: int = 1,\n ):\n \"\"\"\n 离线预测输入数据的以时间窗为单位的异常概率预测, 多线程\n :param data: 输入数据\n :param window: 时间窗口长度(点)\n :param windows_per_cycle: 周期长度: 以时间窗口为单位\n :param stride: 时间窗口步长\n \"\"\"\n if windows_per_cycle < 1:\n raise Exception('a cycle contains 1 window at least')\n # 周期长度\n cycle = windows_per_cycle * window\n # 周期特征: 按周期分组\n groups = self._get_cycle_feature(data, cycle)\n print('group per cycle:')\n for i in range(len(groups)):\n print('cycle: %d ----' % i)\n for each in groups[i]:\n print(' ', each)\n reconstructed, retry_count = self._get_reconstructed_data(\n data, window, windows_per_cycle, groups, stride)\n return reconstructed, retry_count\n\n def predict(\n self,\n data: np.array,\n reconstructed: np.array,\n window: int,\n stride: int = 1,\n ):\n \"\"\"\n 离线处理: 利用参数进行评估, 得到每个点的异常得分\n :param data: 原始数据\n :param reconstructed: 重建好的数据\n :param window: 数据窗口长度\n :param stride: 窗口步长\n :return: 每个点的异常得分\n \"\"\"\n if reconstructed.shape != data.shape:\n raise Exception('shape mismatches')\n n, d = data.shape\n # 异常得分\n anomaly_score = np.zeros((n,))\n # 表示当时某个位置上被已重建窗口的数量\n anomaly_score_weight = np.zeros((n,))\n # 窗口左端点索引\n wb = 0\n while True:\n we = min(n, wb + window)\n # 窗口右端点索引 窗口数据[wb, we)\n score = self._distance(data[wb:we], reconstructed[wb:we])\n for i in range(we - wb):\n w = i + wb\n weight = anomaly_score_weight[w]\n anomaly_score[w] = \\\n (anomaly_score[w] * weight + score) / (weight + 1)\n anomaly_score_weight[wb:we] += 1\n if we >= n:\n break\n wb += stride\n return anomaly_score\n\n def _get_reconstructed_data(\n self,\n data: np.array,\n window: int,\n windows_per_cycle: int,\n groups: list,\n stride: int,\n ):\n \"\"\"\n 离线预测输入数据的以时间窗为单位的异常概率预测, 多线程\n :param data: 输入数据\n :param window: 时间窗口长度(点)\n :param windows_per_cycle: 周期长度: 以时间窗口为单位\n :param groups: 每个周期的分组\n :param stride: 时间窗口步长\n :return:\n \"\"\"\n n, d = data.shape\n # 重建的数据\n reconstructed = np.zeros((n, d))\n # 表示当时某个位置上被已重建窗口的数量\n reconstructing_weight = np.zeros((n,))\n needed_weight = np.zeros((n,))\n # 作业列表\n task_queue = Queue()\n # 结果列表\n result_queue = Queue()\n # 周期长度\n cycle = window * windows_per_cycle\n\n # 窗口左端点索引\n win_l = 0\n while True:\n win_r = min(n, win_l + window)\n # 窗口右端点索引 窗口数据[win_l, win_r)\n task_queue.put((win_l, win_r, groups[win_l // cycle]))\n print(task_queue.qsize())\n needed_weight[win_l:win_r] += 1\n if win_r >= n:\n break\n win_l += stride\n\n task_return_event = Event()\n finished = False\n\n def receive_result_thread():\n \"\"\"\n 接受result_queue结果的线程\n :return:\n \"\"\"\n total_retries = 0\n while True:\n while result_queue.empty():\n task_return_event.clear()\n task_return_event.wait()\n if finished:\n print(total_retries)\n result_queue.put(total_retries)\n return\n wb, we, rec_window, retries, sample_score = result_queue.get()\n total_retries += retries\n for index in range(rec_window.shape[0]):\n w = index + wb\n weight = reconstructing_weight[w]\n reconstructed[w, :] = \\\n (reconstructed[w, :] * weight + rec_window[index]) \\\n / (weight + 1)\n reconstructing_weight[wb:we] += 1\n\n processes = []\n for i in range(self._workers):\n process = WindowReconstructProcess(\n data=data, task_queue=task_queue, result_queue=result_queue,\n cycle=cycle, latest_windows=self._latest_windows,\n sample_score_method=self._sample_score_method,\n sample_rate=self._sample_rate,\n scale=self._scale, rho=self._rho, sigma=self._sigma,\n random_state=self._random_state,\n without_localize_sampling=self._without_localize_sampling,\n retry_limit=self._retry_limit,\n task_return_event=task_return_event\n )\n process.start()\n processes.append(process)\n receiving_thread = Thread(target=receive_result_thread)\n receiving_thread.start()\n for each in processes:\n each.join()\n finished = True\n task_return_event.set()\n receiving_thread.join()\n\n mismatch_weights = []\n for i in range(n):\n if reconstructing_weight[i] != needed_weight[i]:\n mismatch_weights.append('%d' % i)\n if len(mismatch_weights):\n from sys import stderr\n stderr.write('BUG empty weight: index: %s\\n' %\n ','.join(mismatch_weights))\n return reconstructed, result_queue.get()\n\n def _get_cycle_feature(\n self,\n data: np.array,\n cycle: int,\n ):\n \"\"\"\n 将数据按周期进行划分后计算得到每个周期的分组\n :param data: 数据\n :param cycle: 周期长度\n :return: 分组结果\n \"\"\"\n # 数据量, 维度\n n, d = data.shape\n # 每周期分组结果\n cycle_groups = []\n # 工作数量\n group_index = 0\n # 作业队列, 用于向子进程输入数据\n task_queue = Queue()\n # 输出队列\n result_queue = Queue()\n # 周期开始的index\n cb = 0\n while cb < n:\n # 周期结束的index\n ce = min(n, cb + cycle) # 一周期数据为data[cb, ce)\n # 初始化追加列表引用\n if group_index == 0:\n # 没有历史数据\n # 分组默认每个kpi一组\n init_group = []\n if not self._without_grouping:\n for i in range(d):\n init_group.append([i])\n cycle_groups.append(init_group)\n else:\n cycle_groups.append([])\n # 向工作队列中填充输入数据\n if not self._without_grouping:\n task_queue.put((group_index, data[cb:ce]))\n group_index += 1\n cb += cycle\n if self._without_grouping:\n if self._without_grouping == 'one_by_one':\n # 每条kpi一组\n for each in cycle_groups:\n for i in range(d):\n each.append([i])\n elif self._without_grouping == 'all_by_one':\n # 所有kpi一组\n all_in_group = []\n for i in range(d):\n all_in_group.append(i)\n for each in cycle_groups:\n each.append(all_in_group)\n else:\n processes = []\n for i in range(min(len(cycle_groups), self._workers)):\n process = CycleFeatureProcess(\n task_queue, result_queue, self._cluster_threshold\n )\n process.start()\n processes.append(process)\n for process in processes:\n process.join()\n while not result_queue.empty():\n group_index, group = result_queue.get()\n cycle_groups[group_index] = group\n return cycle_groups\n" ]
[ [ "numpy.random.seed", "numpy.arange", "numpy.round", "numpy.array", "numpy.mat", "numpy.zeros" ] ]
MBasting/SRGAN
[ "29b70557b940f5e73ecbb917c0b92176245e9c13" ]
[ "Discriminator.py" ]
[ "import torch.nn as nn\n\n\nclass DiscriminatorBlock(nn.Module):\n\n def __init__(self, in_channels, hidden_channel, kernel, stride, alpha=0.2):\n super(DiscriminatorBlock, self).__init__()\n\n self.conv1 = nn.Conv2d(in_channels, hidden_channel, kernel, stride)\n self.batchnorm = nn.BatchNorm2d(hidden_channel)\n self.lrelu = nn.LeakyReLU(alpha)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.batchnorm(x)\n x = self.lrelu(x)\n return x\n\n\nclass Discriminator(nn.Module):\n\n def __init__(self, in_channels, hidden_channels=None, stride=None, kernel_size=3, alpha=0.2):\n\n super(Discriminator, self).__init__()\n\n if stride is None:\n stride = [1, 2, 1, 2, 1, 2, 1, 2]\n if hidden_channels is None:\n hidden_channels = [64, 64, 128, 128, 256, 256, 512, 512]\n self.conv1 = nn.Conv2d(in_channels, hidden_channels[0], kernel_size, stride[0])\n self.lrelu = nn.LeakyReLU(alpha)\n\n self.disc_blocks = nn.ModuleList(\n [DiscriminatorBlock(hidden_channels[i - 1], hidden_channels[i], kernel_size, stride[i]) for i in\n range(1, len(hidden_channels))])\n\n self.flatten = nn.Flatten()\n self.fc1 = nn.Linear(41472, 1024)\n\n self.fc2 = nn.Linear(1024, 1)\n\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n \"\"\"\n Forward pass\n :param x: Input\n :return: Output of forward pass\n \"\"\"\n x = self.lrelu(self.conv1(x))\n # Loop through all discriminator blocks\n for layer in self.disc_blocks:\n x = layer(x)\n x = self.flatten(x)\n x = self.lrelu(self.fc1(x))\n pred = self.sigmoid(self.fc2(x))\n return pred\n" ]
[ [ "torch.nn.Conv2d", "torch.nn.Flatten", "torch.nn.Sigmoid", "torch.nn.Linear", "torch.nn.LeakyReLU", "torch.nn.BatchNorm2d" ] ]
Hoon1029/jeju-bus-stations-clustering
[ "c227ea5e8ddb9cfae4bc005df560423eebd8d23b" ]
[ "lib/work.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom multiprocessing import Pool\n\ndef parallelize(func, df, core = 8):\n df_split = np.array_split(df, core)\n pool = Pool(core)\n df = pd.concat(pool.map(func, df_split))\n pool.close()\n pool.join()\n return df" ]
[ [ "numpy.array_split" ] ]
RuohanW/il_baseline_fork
[ "824d5117e573292e707d648b087f1e10253cc8d6" ]
[ "irl/util.py" ]
[ "import torch\nimport numpy as np\nfrom collections.abc import Iterable\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport logging, os, sys\n\nfrom torch import Tensor\n\n\nclass LabelSmoothing(torch.nn.Module):\n \"\"\"\n NLL loss with label smoothing.\n \"\"\"\n def __init__(self, smoothing=0.0):\n \"\"\"\n Constructor for the LabelSmoothing module.\n :param smoothing: label smoothing factor\n \"\"\"\n super(LabelSmoothing, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing\n\n def forward(self, x, target):\n logprobs = torch.nn.functional.log_softmax(x, dim=-1)\n\n nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))\n nll_loss = nll_loss.squeeze(1)\n smooth_loss = -logprobs.mean(dim=-1)\n loss = self.confidence * nll_loss + self.smoothing * smooth_loss\n return loss.mean()\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n if not isinstance(val, Iterable):\n val = [val]\n\n val = np.asarray(val)\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef adjust_learning_rate(epoch, opt, optimizer):\n \"\"\"Sets the learning rate to the initial LR decayed by decay rate every steep step\"\"\"\n steps = np.sum(epoch >= np.asarray(opt.lr_decay_epochs))\n if steps > 0:\n new_lr = opt.learning_rate * (opt.lr_decay_rate ** steps)\n for param_group in optimizer.param_groups:\n param_group['lr'] = new_lr\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n idx = torch.from_numpy(np.asarray(topk) - 1)\n return correct.cumsum(0).sum(1)[idx] * 100.0 / batch_size\n\ndef to_cuda_maybe(obj):\n if torch.cuda.is_available():\n if isinstance(obj, Iterable) and (not isinstance(obj, Tensor)):\n return [e.cuda() for e in obj]\n else:\n return obj.cuda()\n return obj\n\n\ndef freeze_bn(module):\n for child in module.modules():\n if isinstance(child, torch.nn.BatchNorm2d):\n child.eval()\n\n\ndef similarity(cuda_arr):\n tmp = cuda_arr.detach().cpu().t().numpy()\n print(cosine_similarity(tmp))\n print(\"------------------\")\n\n\ndef cuda_to_np(arr, dtype=np.float64):\n if isinstance(arr, torch.Tensor):\n return arr.detach().cpu().numpy().astype(dtype)\n return arr.astype(dtype)\n\n\ndef to_np_maybe(obj):\n if isinstance(obj, Iterable):\n return [cuda_to_np(e) for e in obj]\n return cuda_to_np(obj)\n\n\ndef print_metrics(names, vals):\n ret = \"\"\n for key, val in zip(names, vals):\n ret += f\"{key}: {val} \"\n return ret\n\n\ndef np_to_cuda(arr):\n if isinstance(arr, np.ndarray):\n return torch.from_numpy(arr.astype(np.float32)).cuda()\n\n return arr\n\n\ndef partial_reload(model, state_dict):\n cur_dict = model.state_dict()\n partial_dict = {}\n for k, v in state_dict.items():\n if k in cur_dict and cur_dict[k].shape == v.shape:\n partial_dict[k] = v\n cur_dict.update(partial_dict)\n model.load_state_dict(cur_dict)\n\n\ndef get_logger(name, dir=\"logs/\", file_name=None, log_level=logging.INFO):\n #local machine, no file output\n logger = logging.getLogger(name)\n\n c_handler = logging.StreamHandler(stream=sys.stdout)\n c_handler.setLevel(log_level)\n c_format = logging.Formatter('%(message)s')\n c_handler.setFormatter(c_format)\n logger.addHandler(c_handler)\n\n file_path = os.path.join(dir, file_name)\n f_handler = logging.FileHandler(file_path)\n # f_handler.setLevel(log_level)\n f_format = logging.Formatter('%(asctime)s | %(message)s')\n f_handler.setFormatter(f_format)\n logger.addHandler(f_handler)\n\n logger.setLevel(log_level)\n return logger\n\n\ndef save_routine(epoch, model, optimizer, save_path):\n state = {\n 'epoch': epoch,\n 'model': model.state_dict(),\n 'optim': optimizer.state_dict(),\n }\n torch.save(state, save_path)\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "torch.nn.functional.log_softmax", "numpy.asarray", "sklearn.metrics.pairwise.cosine_similarity", "torch.no_grad", "torch.cuda.is_available", "torch.save" ] ]
fishial/Object-Detection-Model
[ "4792f65ea785156a8e240d9cdbbc0c9d013ea0bb" ]
[ "train_scripts/classification/auto_train_cross.py" ]
[ "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function\n\nimport sys\n\n# Change path specificly to your directories\nsys.path.insert(1, '/home/codahead/Fishial/FishialReaserch')\n\nfrom module.classification_package.src.model import FcNet\nfrom module.classification_package.src.model import Backbone\nfrom module.classification_package.src.train import train\n\nimport torch\nimport torchvision.models as models\nimport logging\n\nfrom apex import amp\nfrom PIL import Image\n\nfrom module.classification_package.src.utils import find_device\nfrom module.classification_package.src.utils import WarmupCosineSchedule\nfrom module.classification_package.src.dataset import BalancedBatchSampler\nfrom module.classification_package.src.dataset import FishialDataset\n\nfrom torch import nn\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.utils.data import DataLoader\n\nfrom torchvision import transforms\n\nlogger = logging.getLogger(__name__)\n\n# Setup logging\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\n\nwriter = SummaryWriter('output/fashion_mnist_experiment_1')\ndevice = find_device()\n\nn_classes_per_batch = 2\nn_samples_per_class = 5\n\nds_train = FishialDataset(\n json_path=\"data_train.json\",\n root_folder=\"/home/codahead/Fishial/FishialReaserch/datasets/cutted_v2.5/data_set\",\n transform=transforms.Compose([transforms.Resize((224, 224), Image.BILINEAR),\n transforms.TrivialAugmentWide(),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.ToTensor(),\n transforms.RandomErasing(p=0.358, scale=(0.05, 0.4), ratio=(0.05, 6.1), value=0,\n inplace=False),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n)\n\nds_val = FishialDataset(\n json_path=\"data_test.json\",\n root_folder=\"/home/codahead/Fishial/FishialReaserch/datasets/cutted_v2.5/data_set\",\n transform=transforms.Compose([\n # NewPad(),\n transforms.Resize([224, 224]),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n)\n\nbalanced_batch_sampler_ds_train = BalancedBatchSampler(ds_train, n_classes_per_batch, n_samples_per_class)\n\ndata_loader_train = DataLoader(ds_train, batch_sampler=balanced_batch_sampler_ds_train,\n num_workers=2,\n pin_memory=True)\n\nckp = None\nn_classes = ds_val.n_classes\n\nresnet18 = models.resnet18(pretrained=True)\nresnet18.fc = nn.Identity()\n\nbackbone = Backbone(resnet18)\nmodel = FcNet(backbone, n_classes)\nif ckp:\n model.load_state_dict(torch.load(ckp))\nmodel.to(device)\n\nloss_fn = nn.CrossEntropyLoss()\n# opt = Adadelta(model.parameters(), lr=0.001)\nopt = torch.optim.SGD(model.parameters(),\n lr=3e-2,\n momentum=0.9,\n weight_decay=0)\nepoch = 800\nsteps = len(data_loader_train) * epoch\nscheduler = WarmupCosineSchedule(opt, warmup_steps=500, t_total=steps)\nmodel, opt = amp.initialize(models=model,\n optimizers=opt,\n opt_level='O2')\namp._amp_state.loss_scalers[0]._loss_scale = 2 ** 20\n# Convenient methods in order of verbosity from highest to lowest\n# train(scheduler, steps, opt, model, data_loader_train, ds_val, device, ['at_k'], loss_fn, logging, eval_every=len(data_loader_train))\ntrain(scheduler, steps, opt, model, data_loader_train, ds_val, device, ['accuracy'], loss_fn, logger, eval_every=100)" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.load", "torch.utils.data.DataLoader", "torch.nn.Identity", "torch.utils.tensorboard.SummaryWriter" ] ]
dot9aitclass/pytorch-CycleGAN-and-pix2pix
[ "e977cf9dd2f934097e2ebc403791fa30b6028448" ]
[ "models/pix2pix_model.py" ]
[ "import torch\nfrom .base_model import BaseModel\nfrom . import networks\n\n\nclass Pix2PixModel(BaseModel):\n \"\"\" This class implements the pix2pix model, for learning a mapping from input images to output images given paired data.\n\n The model training requires '--dataset_mode aligned' dataset.\n By default, it uses a '--netG unet256' U-Net generator,\n a '--netD basic' discriminator (PatchGAN),\n and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).\n\n pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf\n \"\"\"\n @staticmethod\n def modify_commandline_options(parser, is_train=True):\n \"\"\"Add new dataset-specific options, and rewrite default values for existing options.\n\n Parameters:\n parser -- original option parser\n is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.\n\n Returns:\n the modified parser.\n\n For pix2pix, we do not use image buffer\n The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1\n By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.\n \"\"\"\n # changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)\n parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned')\n if is_train:\n parser.set_defaults(pool_size=0, gan_mode='vanilla')\n parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')\n\n return parser\n\n def __init__(self, opt):\n \"\"\"Initialize the pix2pix class.\n\n Parameters:\n opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions\n \"\"\"\n BaseModel.__init__(self, opt)\n # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>\n self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']\n # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>\n self.visual_names = ['fake_B']\n # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>\n if self.isTrain:\n self.model_names = ['G', 'D']\n else: # during test time, only load G\n self.model_names = ['G']\n # define networks (both generator and discriminator)\n self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc\n self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.isTrain:\n # define loss functions\n self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)\n self.criterionL1 = torch.nn.L1Loss()\n # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.\n self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n\n def set_input(self, input):\n \"\"\"Unpack input data from the dataloader and perform necessary pre-processing steps.\n\n Parameters:\n input (dict): include the data itself and its metadata information.\n\n The option 'direction' can be used to swap images in domain A and domain B.\n \"\"\"\n AtoB = self.opt.direction == 'AtoB'\n self.real_A = input['A' if AtoB else 'B'].to(self.device)\n #self.real_B = input['B' if AtoB else 'A'].to(self.device)\n self.image_paths = input['A_paths' if AtoB else 'B_paths']\n \n def set_input_vid(self, input):\n \"\"\"Unpack input data from the dataloader and perform necessary pre-processing steps.\n\n Parameters:\n input (dict): include the data itself and its metadata information.\n\n The option 'direction' can be used to swap images in domain A and domain B.\n \"\"\"\n self.real_A = input['A'].to(self.device)\n #self.real_B = input['B' if AtoB else 'A'].to(self.device)\n\n\n def forward(self):\n \"\"\"Run forward pass; called by both functions <optimize_parameters> and <test>.\"\"\"\n #print(type(self.real_A))\n self.fake_B = self.netG(self.real_A) # G(A)\n\n def backward_D(self):\n \"\"\"Calculate GAN loss for the discriminator\"\"\"\n # Fake; stop backprop to the generator by detaching fake_B\n fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator\n pred_fake = self.netD(fake_AB.detach())\n self.loss_D_fake = self.criterionGAN(pred_fake, False)\n # Real\n real_AB = torch.cat((self.real_A, self.real_B), 1)\n pred_real = self.netD(real_AB)\n self.loss_D_real = self.criterionGAN(pred_real, True)\n # combine loss and calculate gradients\n self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5\n self.loss_D.backward()\n\n def backward_G(self):\n \"\"\"Calculate GAN and L1 loss for the generator\"\"\"\n # First, G(A) should fake the discriminator\n fake_AB = torch.cat((self.real_A, self.fake_B), 1)\n pred_fake = self.netD(fake_AB)\n self.loss_G_GAN = self.criterionGAN(pred_fake, True)\n # Second, G(A) = B\n self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1\n # combine loss and calculate gradients\n self.loss_G = self.loss_G_GAN + self.loss_G_L1\n self.loss_G.backward()\n\n def optimize_parameters(self):\n self.forward() # compute fake images: G(A)\n # update D\n self.set_requires_grad(self.netD, True) # enable backprop for D\n self.optimizer_D.zero_grad() # set D's gradients to zero\n self.backward_D() # calculate gradients for D\n self.optimizer_D.step() # update D's weights\n # update G\n self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G\n self.optimizer_G.zero_grad() # set G's gradients to zero\n self.backward_G() # calculate graidents for G\n self.optimizer_G.step() # udpate G's weights\n" ]
[ [ "torch.nn.L1Loss", "torch.cat" ] ]
tcliang-tw/meda-env
[ "e0ba12a5ae85ea98d5a4dee9d21bb9c51510a675" ]
[ "meda_env/evaluate.py" ]
[ "#!/usr/bin/python\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nimport warnings\nwarnings.filterwarnings('ignore')\nfrom pathlib import Path\nimport argparse\nimport time\nimport tensorflow as tf\nfrom stable_baselines.common import make_vec_env\nfrom stable_baselines.common.vec_env import DummyVecEnv\nfrom stable_baselines.common.evaluation import evaluate_policy\nfrom stable_baselines import PPO2, ACER, DQN\nfrom meda import*\nfrom my_net import VggCnnPolicy, DqnVggCnnPolicy\nfrom utilities import DecentrailizedTrainer, CentralizedEnv, ParaSharingEnv, ConcurrentAgentEnv\nimport csv\n\nALGOS = {'PPO': PPO2, 'ACER': ACER, 'DQN': DQN}\n\ndef showIsGPU():\n if tf.test.is_gpu_available():\n print(\"### Training on GPUs... ###\")\n else:\n print(\"### Training on CPUs... ###\")\n\ndef EvaluateAgent(args, env, obs, agent, centralized = True):\n if centralized:\n env = CentralizedEnv(env)\n obs = env.restart()\n episode_reward = 0.0\n done, state = False, None\n step = 0\n while not done:\n if centralized: # centralized multi-agent\n action, state = agent.predict(obs)\n obs, reward, done, _info = env.step(action) # env: CentralizedEnv\n else: # concurrent, para-sharing\n action = {}\n for droplet in env.agents:\n model = agent[droplet]\n obs_droplet = obs[droplet]\n action[droplet], _ = model.predict(obs_droplet)\n obs, m_rewards, m_dones, _info = env.step(action) # env: Meda, obs: m_obs\n reward = np.average([r for r in m_rewards.values()])\n done = bool(np.all([d for d in m_dones.values()]))\n step+=1\n episode_reward += reward\n return episode_reward\n\ndef evaluateOnce(args, path_log, env, repeat_num):\n algo = ALGOS[args.algo]\n len_results = (args.stop_iters - args.start_iters)//5 + 1\n results = {'baseline': [0]*len_results, 'single': [0]*len_results, 'multi': [0]*len_results}\n for i in range(len_results):\n print('### Evaluating iteration %d' %(i*5))\n model_name = '_'.join(['repeat', str(repeat_num), 'training', str(i*5), str(args.n_timesteps)])\n path_multi = os.path.join(path_log, model_name)\n if args.method == 'centralized':\n multi_agent = algo.load(path_multi)\n else:\n multi_agent = {}\n for agent_index, agent in enumerate(env.agents):\n if args.method == 'concurrent':\n multi_agent[agent] = algo.load(path_multi+'_c{}'.format(agent_index))\n else:\n multi_agent[agent] = algo.load(path_multi+'shared')\n baseline_agent = BaseLineRouter(args.width, args.length)\n for j in range(args.n_evaluate):\n if j%5 == 0:\n print('### Episode %d.'%j)\n obs = env.reset()\n routing_manager = env.routing_manager\n results['baseline'][i] += baseline_agent.getEstimatedReward(routing_manager)[0]\n results['multi'][i] += EvaluateAgent(args, env, obs, multi_agent, args.method == 'centralized')\n results['baseline'][i] /= args.n_evaluate\n results['multi'][i] /= args.n_evaluate\n return results\n\ndef save_evaluation(agent_rewards, filename, path_log):\n with open(os.path.join(path_log, filename), 'w') as agent_log:\n writer_agent = csv.writer(agent_log)\n writer_agent.writerows(agent_rewards)\n\ndef evaluateSeveralTimes(args=None, path_log=None):\n showIsGPU()\n multi_rewards, single_rewards, baseline_rewards = [], [], []\n for repeat in range(1, args.n_repeat+1):\n print(\"### In repeat %d\" %(repeat))\n start_time = time.time()\n env = MEDAEnv(w=args.width, l=args.length, n_agents=args.n_agents,\n b_degrade=args.b_degrade, per_degrade = args.per_degrade)\n results = evaluateOnce(args, path_log, env, repeat_num=repeat)\n print(\"### Repeat %s costs %s seconds ###\" %(str(repeat), time.time() - start_time))\n multi_rewards.append(results['multi'])\n single_rewards.append(results['single'])\n baseline_rewards.append(results['baseline'])\n save_evaluation(multi_rewards, 'multi_rewards.csv', path_log)\n save_evaluation(single_rewards, 'single_rewards.csv', path_log)\n save_evaluation(baseline_rewards, 'baseline_rewards.csv', path_log)\n\ndef get_parser():\n \"\"\"\n Creates an argument parser.\n \"\"\"\n parser = argparse.ArgumentParser(description='RL training for MEDA')\n # device\n parser.add_argument('--cuda', help='CUDA Visible devices', default='0', type=str, required=False)\n parser.add_argument('--algo', help='RL Algorithm', default='PPO', type=str, required=False, choices=list(ALGOS.keys()))\n # rl training\n parser.add_argument('--method', help='The method use for rl training (centralized, sharing, concurrent)',\n type=str, default='concurrent', choices=['centralized', 'sharing', 'concurrent'])\n parser.add_argument('--n-repeat', help='Number of repeats for the experiment', type=int, default=3)\n parser.add_argument('--start-iters', help='Number of iterations the initialized model has been trained',\n type=int, default=0)\n parser.add_argument('--stop-iters', help='Total number of iterations (including pre-train) for one repeat of the experiment',\n type=int, default=100)\n parser.add_argument('--n-timesteps', help='Number of timesteps for each iteration',\n type=int, default=20000)\n # env settings\n parser.add_argument('--width', help='Width of the biochip', type = int, default = 30)\n parser.add_argument('--length', help='Length of the biochip', type = int, default = 60)\n parser.add_argument('--n-agents', help='Number of agents', type = int, default = 2)\n parser.add_argument('--b-degrade', action = \"store_true\")\n parser.add_argument('--per-degrade', help='Percentage of degrade', type = float, default = 0)\n # rl evaluate\n parser.add_argument('--n-evaluate', help='Number of episodes to evaluate the model for each iteration',\n type=int, default=20)\n return parser\n\ndef main(args=None):\n parser = get_parser()\n args = parser.parse_args(args)\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.cuda\n # the path to where log files will be saved\n # example path: log/30_60/PPO_SimpleCnnPolicy\n path_log = os.path.join('log', args.method, str(args.width)+'_'+str(args.length),\n str(args.n_agents), args.algo+'_VggCnnPolicy')\n print('### Start evaluating algorithm %s'%(args.algo))\n evaluateSeveralTimes(args, path_log = path_log)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.test.is_gpu_available" ] ]
jiangxiaoshuaiya/BertHub
[ "41eeba73c89ecf2ed2b77616c2aa40c7795495dd" ]
[ "example/haihua/ToyBert/utils.py" ]
[ "#!/usr/local/Cellar/python/3.7.4_1\n# -*- coding: utf-8 -*-\n# @File : utils.py\n# @Author : 姜小帅\n# @Moto : 良好的阶段性收获是坚持的重要动力之一\n# @Contract: [email protected]\nimport os\nimport json\nimport torch\nimport random\nimport numpy as np\nimport datetime\n\n\n# input examples for multiple choice reading comprehension\nclass InputExample(object):\n '''\n @Parameters\n Id: question id, a paraph maybe contains tow questions,\n one question corresponds to an instance of corresponding\n context: related paragraphs for answering questions\n pair: consist of a question and an answer, \n form likes: question + ' ' + answer(just one choice)\n label: correct answer for question\n '''\n def __init__(self, Id, context, pair, label=-1):\n self.Id = Id\n self.context = context\n self.pair = pair\n self.label = label\n\n# input feature for multiple choice reading comprehension\n# tokenization make InputExample become InputFeature \nclass InputFeature(object):\n '''\n @Parameters\n example_id: question id, a paraph maybe contains tow questions,\n one question corresponds to an instance of corresponding\n choices_features: input ids, attention_mask and token_type_ids of each example\n label: correct answer for question\n '''\n def __init__(self, example_id, choices_features, label):\n self.example_id = example_id\n self.choices_features = [{\n 'input_ids': input_ids,\n 'attention_mask': attention_mask,\n 'token_type_ids': token_type_ids\n }\n for input_ids, attention_mask, token_type_ids in choices_features\n ]\n self.label = label\n\ndef select_field(features, field):\n return [[choice[field] for choice in feature.choices_features] for feature in features]\n\n\n# a defined class of model output\nclass ModelOutput(object):\n def __init__(self, loss, logits, hidden_states=None, attentions=None):\n self.loss = loss\n self.logits = logits\n self.hidden_states = hidden_states\n self.attentions = attentions\n\n\n\ndef load_data(path, train_test):\n \n with open(path + '{}.json'.format(train_test)) as json_file:\n data = json.load(json_file)\n\n option = ['A', 'B', 'C', 'D']\n label_map = {label: idx for idx, label in enumerate(option)}\n\n examples = []\n for item in data:\n question = None\n answer = None\n choice = None\n Id = item['ID']\n context = item['Content']\n for qa in item['Questions']:\n Id = qa['Q_id']\n answer = label_map[qa['Answer']] if len(qa)==4 is not None else -1\n question = qa['Question']\n choice = qa['Choices']\n \n examples.append(\n InputExample(\n Id=int(Id),\n context=[context for i in range(len(choice))],\n pair=[question + ' ' + i[2:] for i in choice],\n label=answer\n )\n )\n \n return examples\n\n\ndef format_time(elapsed):\n elapsed_rounded = int(round((elapsed)))\n return str(datetime.timedelta(seconds=elapsed_rounded))\n\n\ndef fix_seed(seed):\n '''\n This funcation help you to fix seed in train, \n that means you could get same results in sevral times\n '''\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n\n" ]
[ [ "torch.manual_seed", "torch.cuda.manual_seed", "numpy.random.seed" ] ]
MyrrhDev/SocioMark
[ "dd2f61a17f999b203581919349becd20e4f1f1b5" ]
[ "backend/app/server/controllers/post.py" ]
[ "import cv2\nimport numpy as np\nimport tempfile\nfrom PIL import Image\nfrom bson.objectid import ObjectId\nfrom fastapi import HTTPException\nfrom ..database import posts_collection, users_collection\nfrom .like import get_all_likes_on_post\nfrom .comment import get_all_comments_on_post\nfrom .steganography.hash import hash_sha_info\nfrom .upload import upload_image_path, upload_image\nfrom .verify import encode_image, decode_image\n\n# helpers\n\n\ndef post_helper(post, user, likes=None, comments=None) -> dict:\n if not likes:\n likes = []\n if not comments:\n comments = []\n return {\n \"post_id\": str(post[\"_id\"]),\n \"user_id\": str(user[\"_id\"]),\n \"author_name\": user[\"name\"],\n \"author_image\": user[\"profile_picture\"],\n \"report_counter\": post[\"report_counter\"],\n \"likes\": likes,\n \"comments\": comments,\n \"image\": post[\"image\"],\n \"description\": post[\"description\"]\n }\n\n\nasync def initialize_post(user_id: ObjectId, post: dict, image_url: str, user_sha: str):\n post[\"image\"] = image_url\n post[\"user_sha\"] = user_sha\n post[\"user_id\"] = user_id\n return post\n\n\n# Add a new post into to the database\nasync def add_post(email: str, post_data: dict) -> dict:\n user = await users_collection.find_one({\"email\": email})\n upload_file = post_data[\"image\"]\n\n image_url = upload_image(upload_file)\n\n pil_image = Image.open(upload_file.file)\n rgb_image = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)\n\n embedded_sha = await decode_image(rgb_image)\n\n sha_already_exists = await posts_collection.find_one({\"user_sha\": embedded_sha})\n\n if not sha_already_exists:\n user_sha, info = hash_sha_info(str(user[\"_id\"]))\n encoded_image = await encode_image(rgb_image, info)\n\n # upload the image\n with tempfile.NamedTemporaryFile(delete=False, suffix=\".png\") as tmp:\n cv2.imwrite(tmp.name, encoded_image)\n image_url = upload_image_path(tmp.name)\n\n else:\n user_sha = sha_already_exists[\"user_sha\"]\n\n # delete file\n del post_data[\"image\"]\n\n post_data = await initialize_post(user[\"_id\"], post_data, image_url, user_sha)\n post = await posts_collection.insert_one(post_data)\n new_post = await posts_collection.find_one({\"_id\": post.inserted_id})\n return post_helper(new_post, user)\n\n\n# Retrieve all posts with user id present in the database\nasync def retrieve_posts(user_id: ObjectId):\n posts = []\n user = await users_collection.find_one({\"_id\": ObjectId(user_id)})\n posts_by_user = posts_collection.find({\"user_id\": ObjectId(user_id)})\n async for post in posts_by_user:\n likes_on_post = await get_all_likes_on_post(post[\"_id\"])\n comments_on_post = await get_all_comments_on_post(post[\"_id\"])\n posts.append(post_helper(post, user, likes_on_post, comments_on_post))\n return posts\n\n\n# Retrieve all posts from the database\nasync def retrieve_all_posts():\n posts = []\n async for post in posts_collection.find():\n user = await users_collection.find_one({\"_id\": post[\"user_id\"]})\n likes_on_post = await get_all_likes_on_post(post[\"_id\"])\n comments_on_post = await get_all_comments_on_post(post[\"_id\"])\n posts.append(post_helper(post, user, likes_on_post, comments_on_post))\n return posts\n\n\n# Retrieve a post with a matching ID\nasync def retrieve_post(post_id: str) -> dict:\n post = await posts_collection.find_one({\"_id\": ObjectId(post_id)})\n if post:\n user = await users_collection.find_one({\"_id\": post[\"user_id\"]})\n likes_on_post = await get_all_likes_on_post(post[\"_id\"])\n comments_on_post = await get_all_comments_on_post(post[\"_id\"])\n return post_helper(post, user, likes_on_post, comments_on_post)\n\n\n# Update a post with a matching ID\nasync def update_post(email: str, post_id: str, data: dict):\n # Return false if an empty request body is sent.\n if len(data) < 1:\n return False\n user = await users_collection.find_one({\"email\": email})\n post = await posts_collection.find_one({\"_id\": ObjectId(post_id)})\n if post and user[\"_id\"] == post[\"user_id\"]:\n updated_post = await posts_collection.update_one(\n {\"_id\": ObjectId(post_id)}, {\"$set\": data}\n )\n if updated_post:\n return True\n return False\n else:\n raise HTTPException(status_code=501, detail='Something went wrong, try again.')\n\n\n# Report a post with a matching ID\nasync def report_post(id: str):\n post = await posts_collection.find_one({\"_id\": ObjectId(id)})\n if post:\n current_counter = post[\"report_counter\"]\n updated_post = await posts_collection.update_one(\n {\"_id\": ObjectId(id)}, {\"$set\": {\"report_counter\": current_counter + 1}}\n )\n if updated_post:\n return True\n return False\n\n\n# Delete a post from the database\nasync def delete_post(email: str, id: str):\n user = await users_collection.find_one({\"email\": email})\n post = await posts_collection.find_one({\"_id\": ObjectId(id)})\n if post and user[\"_id\"] == post[\"user_id\"]:\n await posts_collection.delete_one({\"_id\": ObjectId(id)})\n return True\n else:\n raise HTTPException(status_code=403, detail='User not authorized.')\n" ]
[ [ "numpy.array" ] ]
FZJ-IEK3-VSA/HiSim
[ "e9b3a69c6db331523b9ed5ac7aa6f57f9b4798b2" ]
[ "hisim/components/generic_heat_pump.py" ]
[ "# Generic/Built-in\nimport numpy as np\nimport copy\nimport matplotlib\nimport seaborn\nfrom math import pi\n\n# Owned\nimport hisim.utils as utils\nfrom hisim.components.weather import Weather\nfrom hisim import component as cp\nfrom hisim.loadtypes import LoadTypes, Units\nfrom hisim.simulationparameters import SimulationParameters\n#from hisim.components.extended_storage import WaterSlice\n#from hisim.components.configuration import WarmWaterStorageConfig\n#from hisim.components.configuration import PhysicsConfig\nfrom hisim.components.building import Building\nfrom hisim.components.weather import Weather\nfrom hisim import log\n\nseaborn.set(style='ticks')\nfont = {'family' : 'normal',\n 'size' : 24}\n\nmatplotlib.rc('font', **font)\n\n__authors__ = \"Vitor Hugo Bellotto Zago\"\n__copyright__ = \"Copyright 2021, the House Infrastructure Project\"\n__credits__ = [\"Noah Pflugradt\"]\n__license__ = \"MIT\"\n__version__ = \"0.1\"\n__maintainer__ = \"Vitor Hugo Bellotto Zago\"\n__email__ = \"[email protected]\"\n__status__ = \"development\"\n\nclass HeatPumpState:\n\n def __init__(self,\n start_timestep=None,\n thermal_energy_delivered = 0.0,\n cop = 1,\n cycle_number=None):\n self.start_timestep=start_timestep\n self.thermal_energy_delivered = thermal_energy_delivered\n self.cycle_number = cycle_number\n if thermal_energy_delivered == 0.0:\n self.activation = 0\n self.heating = 0\n self.cooling = 0\n self.cop = 1\n self.electricity_in = abs(self.thermal_energy_delivered / self.cop)\n elif self.thermal_energy_delivered > 0.0:\n self.activation = -1\n self.heating = self.thermal_energy_delivered\n self.cooling = 0\n self.cop = cop\n self.electricity_in = abs(self.thermal_energy_delivered / self.cop)\n elif self.thermal_energy_delivered < 0.0:\n self.activation = 1\n self.heating = 0\n self.cooling = self.thermal_energy_delivered\n self.cop = cop\n self.electricity_in = abs(self.thermal_energy_delivered / self.cop)\n else:\n raise Exception(\"Impossible Heat Pump State.\")\n \n def clone(self):\n return HeatPumpState(self.start_timestep, self.thermal_energy_delivered, self.cop, self.cycle_number)\n\nclass HeatPump(cp.Component):\n \"\"\"\n Heat pump implementation. It does support a\n refrigeration cycle. Thermal output is delivered straight to\n the component object.\n\n Parameters\n ----------\n manufacturer : str\n Heat pump manufacturer\n name : str\n Heat pump model\n min_operation_time : int, optional\n Minimum time duration that the heat pump operates under one cycle, in seconds. The default is 3600.\n min_idle_time : int, optional\n Minimum time duration that the heat pump has to stay idle, in seconds. The default is 900.\n \"\"\"\n # Inputs\n State = \"State\"\n TemperatureOutside = \"TemperatureOutside\"\n WaterConsumption = \"WaterConsumption\"\n WaterInput_mass = \"WaterInput_mass\" # kg/s\n WaterInput_temperature = \"WaterInput_temperature\" # °C\n\n # Outputs\n #WaterOutput_mass = \"WaterOutput_mass\" # kg/s\n #WaterOutput_temperature = \"WaterOutput_temperature\" # °C\n #WastedEnergyMaxTemperature = \"Wasted Energy Max Temperature\" # W\n\n ThermalEnergyDelivered = \"ThermalEnergyDelivered\"\n Heating = \"Heating\"\n Cooling = \"Cooling\"\n ElectricityOutput = \"ElectricityOutput\"\n NumberOfCycles = \"NumberOfCycles\"\n\n # Similar components to connect to:\n # 1. Weather\n # 2. HeatPumpController\n @utils.measure_execution_time\n def __init__(self,\n my_simulation_parameters: SimulationParameters,\n manufacturer : str = \"Viessmann Werke GmbH & Co KG\",\n name : str =\"Vitocal 300-A AWO-AC 301.B07\",\n min_operation_time : int = 60 * 60,\n min_idle_time : int = 15 * 60):\n super().__init__(\"HeatPump\", my_simulation_parameters=my_simulation_parameters)\n\n self.build(manufacturer, name, min_operation_time, min_idle_time)\n\n self.number_of_cycles = 0\n self.number_of_cycles_previous = copy.deepcopy(self.number_of_cycles)\n self.state = HeatPumpState(start_timestep=int(0),cycle_number=0)\n self.previous_state = self.state.clone( )\n\n\n\n # Inputs - Mandatories\n self.stateC: cp.ComponentInput = self.add_input(self.ComponentName,\n self.State,\n LoadTypes.Any,\n Units.Any,\n True)\n self.t_outC: cp.ComponentInput = self.add_input(self.ComponentName,\n self.TemperatureOutside,\n LoadTypes.Any,\n Units.Celsius,\n True)\n # Inputs - Not Mandatories\n self.water_loadC: cp.ComponentInput = self.add_input(self.ComponentName,\n self.WaterConsumption,\n LoadTypes.Volume,\n Units.Liter,\n False)\n self.water_input_mass: cp.ComponentInput = self.add_input(self.ComponentName,\n self.WaterInput_mass,\n LoadTypes.WarmWater,\n Units.kg_per_sec,\n False)\n self.water_input_temperature: cp.ComponentInput = self.add_input(self.ComponentName,\n self.WaterInput_temperature,\n LoadTypes.WarmWater,\n Units.Celsius,\n False)\n\n\n\n # Outputs\n #self.water_output_mass: cp.ComponentOutput = self.add_output(self.ComponentName,\n # self.WaterOutput_mass,\n # LoadTypes.WarmWater,\n # Units.kg_per_sec)\n #self.water_output_temperature: cp.ComponentOutput = self.add_output(self.ComponentName,\n # self.WaterOutput_temperature,\n #ä LoadTypes.WarmWater,\n # Units.Celsius)\n #self.wasted_energy_max_temperature: cp.ComponentOutput = self.add_output(self.ComponentName,\n # self.WastedEnergyMaxTemperature,\n # LoadTypes.WarmWater,\n # Units.Watt)\n\n self.thermal_energy_deliveredC: cp.ComponentOutput = self.add_output(self.ComponentName,\n self.ThermalEnergyDelivered,\n LoadTypes.Heating,\n Units.Watt)\n\n self.heatingC: cp.ComponentOutput = self.add_output(self.ComponentName,\n self.Heating,\n LoadTypes.Heating,\n Units.Watt)\n\n self.coolingC: cp.ComponentOutput = self.add_output(self.ComponentName,\n self.Cooling,\n LoadTypes.Cooling,\n Units.Watt)\n\n self.electricity_outputC: cp.ComponentOutput = self.add_output(self.ComponentName,\n self.ElectricityOutput,\n LoadTypes.Electricity,\n Units.Watt)\n\n self.number_of_cyclesC : cp.ComponentOutput = self.add_output(self.ComponentName,\n self.NumberOfCycles,\n LoadTypes.Any,\n Units.Any)\n \n self.add_default_connections( Weather, self.get_weather_default_connections( ) )\n self.add_default_connections( HeatPumpController, self.get_controller_default_connections( ) )\n \n def get_weather_default_connections( self ):\n log.information(\"setting weather default connections in HeatPump\")\n connections = [ ]\n weather_classname = Weather.get_classname( )\n connections.append( cp.ComponentConnection( HeatPump.TemperatureOutside, weather_classname, Weather.TemperatureOutside ) )\n return connections\n \n def get_controller_default_connections( self ):\n log.information(\"setting controller default connections in HeatPump\")\n connections = [ ]\n controller_classname = HeatPumpController.get_classname( )\n connections.append( cp.ComponentConnection( HeatPump.State, controller_classname, HeatPumpController.State ) )\n return connections\n\n def build( self, manufacturer, name, min_operation_time, min_idle_time ):\n # Simulation parameters\n\n # Retrieves heat pump from database - BEGIN\n heat_pumps_database = utils.load_smart_appliance(\"Heat Pump\")\n\n heat_pump_found = False\n for heat_pump in heat_pumps_database:\n if heat_pump[\"Manufacturer\"] == manufacturer and heat_pump[\"Name\"] == name:\n heat_pump_found = True\n break\n\n if heat_pump_found == False:\n raise Exception(\"Heat pump model not registered in the database\")\n\n # Interpolates COP data from the database\n self.cop_ref = []\n self.t_out_ref = []\n for heat_pump_cops in heat_pump['COP']:\n self.t_out_ref.append(float([*heat_pump_cops][0][1:].split(\"/\")[0]))\n self.cop_ref.append(float([*heat_pump_cops.values()][0]))\n self.cop_coef = np.polyfit(self.t_out_ref, self.cop_ref, 1)\n\n self.max_heating_power = heat_pump['Nominal Heating Power A2/35'] * 1E3\n #self.max_heating_power = 11 * 1E3\n self.max_cooling_power = - self.max_heating_power\n # Retrieves heat pump from database - END\n\n # Sets the power variation restrictions\n # Default values: 15 minutes to full power\n # Used only for non-clocked heat pump\n self.max_heating_power_var = self.max_heating_power * self.my_simulation_parameters.seconds_per_timestep / 900\n self.max_cooling_power_var = - self.max_heating_power * self.my_simulation_parameters.seconds_per_timestep / 900\n\n # Sets the time operation restricitions\n self.min_operation_time = min_operation_time / self.my_simulation_parameters.seconds_per_timestep\n self.min_idle_time = min_idle_time / self.my_simulation_parameters.seconds_per_timestep\n\n # Writes info to report\n self.write_to_report()\n\n # Applies correction due to timestep\n self.set_time_correction()\n #self.set_time_correction(self.time_correction_factor)\n\n def set_time_correction(self, factor=1):\n if factor == 1:\n self.HasBeenConverted = False\n if self.HasBeenConverted is True:\n raise Exception(\"It has been already converted!\")\n self.max_heating_power *= factor\n self.max_cooling_power *= factor\n self.max_heating_power_var *= factor\n self.max_cooling_power_var *= factor\n if factor != 1:\n self.HasBeenConverted = True\n\n\n def cal_cop(self, t_out):\n return self.cop_coef[0] * t_out + self.cop_coef[1]\n\n def i_save_state(self):\n self.previous_state = self.state.clone()\n self.number_of_cycles_previous = self.number_of_cycles\n\n def i_restore_state(self):\n self.state = self.previous_state.clone() # copy.deepcopy(self.previous_state)\n self.number_of_cycles = self.number_of_cycles_previous\n\n def i_doublecheck(self, timestep: int, stsv: cp.SingleTimeStepValues):\n pass\n\n def write_to_report(self):\n lines = []\n lines.append(\"Name: {}\".format(\"Heat Pump\"))\n lines.append(\"Max power: {:4.0f} kW\".format((self.max_heating_power)*1E-3))\n lines.append(\"Max power var: {:4.0f}\".format(self.max_heating_power_var))\n #lines = []\n #lines.append([self.ComponentName,\"\"])\n #lines.append([\"Max power:\",\"{:4.2f}\".format(self.max_heating_power)])\n #lines.append([\"Max power var:\",\"{:4.2f}\".format(self.max_heating_power_var)])\n return lines\n\n def i_simulate(self, timestep: int, stsv: cp.SingleTimeStepValues, force_convergence: bool):\n # Inputs\n stateC = stsv.get_input_value(self.stateC)\n t_out = stsv.get_input_value(self.t_outC)\n #log.information(\"State: {}, Temperature: {}\".format(stateC, t_out))\n #log.information(\"State of Activation: {}\".format(self.state.activation))\n #log.information(\"Timestep special: {}\".format(self.state.start_timestep + self.min_idle_time))\n # Calculation\n\n ## Calculation.ThermalEnergyStorage\n ## ToDo: Implementation with Thermal Energy Storage - BEGIN\n #if self.water_loadC.SourceOutput is not None:\n # if stsv.get_input_value(self.water_loadC) != 0:\n # control_signal = 1\n # else:\n # control_signal = 0\n # # Inputs\n # water_input_mass_sec = stsv.get_input_value(self.water_input_mass)\n # water_input_mass = water_input_mass_sec\n # water_input_temperature = stsv.get_input_value(self.water_input_temperature)\n\n # mass_flow_max = self.max_heating_power / (4180 * 25) # kg/s ## -> ~0.07\n\n # if control_signal == 1 and (water_input_mass == 0 and water_input_temperature == 0):\n # \"\"\"first iteration\"\"\"\n # water_input_temperature = 40\n # water_input_mass = mass_flow_max\n\n # if control_signal == 1:\n # volume_flow_gasheater = water_input_mass / PhysicsConfig.water_density\n # ws = WaterSlice(WarmWaterStorageConfig.tank_diameter, (4 * volume_flow_gasheater) / (pi * WarmWaterStorageConfig.tank_diameter ** 2), water_input_temperature)\n # ws_output, wasted_energy_max_temperature, thermal_output = self.process_thermal(ws)\n # else:\n # height_flow_gasheater = 0\n # water_input_temperature = 0\n # ws = WaterSlice(WarmWaterStorageConfig.tank_diameter, height_flow_gasheater, water_input_temperature)\n # ws_output = ws\n # wasted_energy_max_temperature = 0\n\n # ws_output_mass = ws_output.mass\n # ws_output_temperature = ws_output.temperature\n\n # # Mass is consistent\n # stsv.set_output_value(self.water_output_mass, ws_output_mass)\n # stsv.set_output_value(self.water_output_temperature, ws_output_temperature)\n # stsv.set_output_value(self.wasted_energy_max_temperature, wasted_energy_max_temperature)\n ## ToDo: Implementation with Thermal Energy Storage - END\n\n\n ## Calculation.ThermalEnergyDelivery\n ### Heat Pump is on\n if self.state.activation != 0:\n number_of_cycles = self.state.cycle_number\n # Checks if the minimum running time has been reached\n if timestep >= self.state.start_timestep + self.min_operation_time and stateC == 0:\n self.state = HeatPumpState(start_timestep=timestep,\n cycle_number=number_of_cycles)\n\n stsv.set_output_value(self.thermal_energy_deliveredC, self.state.thermal_energy_delivered)\n stsv.set_output_value(self.heatingC, self.state.heating)\n stsv.set_output_value(self.coolingC, self.state.cooling)\n stsv.set_output_value(self.electricity_outputC, self.state.electricity_in)\n stsv.set_output_value(self.number_of_cyclesC, self.number_of_cycles)\n return\n\n ### Heat Pump is Off\n if stateC != 0 and (timestep >= self.state.start_timestep + self.min_idle_time):\n self.number_of_cycles = self.number_of_cycles + 1\n number_of_cycles = self.number_of_cycles\n if stateC == 1:\n #if stsv.get_input_value(self.stateC) > 0:\n self.state = HeatPumpState(start_timestep=timestep,\n thermal_energy_delivered = self.max_heating_power,\n cop=self.cal_cop(t_out),\n cycle_number=number_of_cycles)\n else:\n self.state = HeatPumpState(start_timestep=timestep,\n thermal_energy_delivered = self.max_cooling_power,\n cop=self.cal_cop(t_out),\n cycle_number = number_of_cycles)\n\n #log.information(self.state.thermal_energy_delivered)\n # Outputs\n stsv.set_output_value(self.thermal_energy_deliveredC, self.state.thermal_energy_delivered)\n stsv.set_output_value(self.heatingC, self.state.heating)\n stsv.set_output_value(self.coolingC, self.state.cooling)\n stsv.set_output_value(self.electricity_outputC, self.state.electricity_in)\n stsv.set_output_value(self.number_of_cyclesC, self.number_of_cycles)\n\n def process_thermal(self, ws_in):\n pass\n #temperature_max = 55\n #heat_capacity = PhysicsConfig.water_specific_heat_capacity\n #thermal_energy_to_add = self.max_heating_power\n #ws_out_mass = ws_in.mass\n #try:\n # ws_out_temperature = ws_in.temperature + thermal_energy_to_add / (heat_capacity * ws_out_mass)\n #except ZeroDivisionError:\n # log.information(heat_capacity)\n # log.information(ws_out_mass)\n # log.information(ws_in.mass)\n # raise ValueError\n #wasted_energy = 0\n #if ws_out_temperature > temperature_max:\n # delta_T = ws_out_temperature - temperature_max\n # wasted_energy = (delta_T * ws_out_mass * PhysicsConfig.water_specific_heat_capacity)\n # ws_out_temperature = temperature_max\n #ws_out_enthalpy = ws_in.enthalpy + thermal_energy_to_add\n #ws_in.change_slice_parameters(new_temperature=ws_out_temperature, new_enthalpy=ws_out_enthalpy, new_mass=ws_out_mass)\n #return ws_in, wasted_energy, thermal_energy_to_add\n\n\nclass HeatPumpController(cp.Component):\n \"\"\"\n Heat Pump Controller. It takes data from other\n components and sends signal to the heat pump for\n activation or deactivation.\n\n Parameters\n --------------\n t_air_heating: float\n Minimum comfortable temperature for residents\n t_air_cooling: float\n Maximum comfortable temperature for residents\n offset: float\n Temperature offset to compensate the hysteresis\n correction for the building temperature change\n mode : int\n Mode index for operation type for this heat pump\n \"\"\"\n # Inputs\n TemperatureMean = \"Residence Temperature\"\n ElectricityInput = \"ElectricityInput\"\n\n # Outputs\n State = \"State\"\n\n # Similar components to connect to:\n # 1. Building\n @utils.measure_execution_time\n def __init__(self,\n my_simulation_parameters: SimulationParameters,\n t_air_heating: float = 18.0,\n t_air_cooling: float = 26.0,\n offset: float = 0.0,\n mode=1):\n super().__init__(\"HeatPumpController\", my_simulation_parameters=my_simulation_parameters)\n self.build(t_air_cooling=t_air_cooling,\n t_air_heating=t_air_heating,\n offset=offset,\n mode=mode)\n\n self.t_mC: cp.ComponentInput = self.add_input(self.ComponentName,\n self.TemperatureMean,\n LoadTypes.Temperature,\n Units.Celsius,\n True)\n self.electricity_inputC : cp.ComponentInput= self.add_input(self.ComponentName,\n self.ElectricityInput,\n LoadTypes.Electricity,\n Units.Watt,\n False)\n self.stateC: cp.ComponentOutput = self.add_output(self.ComponentName,\n self.State,\n LoadTypes.Any,\n Units.Any)\n \n self.add_default_connections( Building, self.get_building_default_connections( ) )\n \n def get_building_default_connections( self ):\n log.information(\"setting building default connections in Heatpumpcontroller\")\n connections = [ ]\n building_classname = Building.get_classname( )\n connections.append( cp.ComponentConnection( HeatPumpController.TemperatureMean, building_classname, Building.TemperatureMean ) )\n return connections\n\n def build(self, t_air_heating, t_air_cooling, offset, mode):\n # Sth\n self.controller_heatpumpmode = \"off\"\n self.previous_heatpump_mode = self.controller_heatpumpmode\n\n # Configuration\n self.t_set_heating = t_air_heating\n self.t_set_cooling = t_air_cooling\n self.offset = offset\n\n self.mode = mode\n\n def i_save_state(self):\n self.previous_heatpump_mode = self.controller_heatpumpmode\n\n def i_restore_state(self):\n self.controller_heatpumpmode = self.previous_heatpump_mode\n\n def i_doublecheck(self, timestep: int, stsv: cp.SingleTimeStepValues):\n pass\n\n def i_simulate(self, timestep: int, stsv: cp.SingleTimeStepValues, force_convergence: bool):\n # check demand, and change state of self.has_heating_demand, and self._has_cooling_demand\n if force_convergence:\n pass\n else:\n # Retrieves inputs\n t_m_old = stsv.get_input_value(self.t_mC)\n electricity_input = stsv.get_input_value(self.electricity_inputC)\n\n if self.mode == 1:\n self.conditions(t_m_old)\n elif self.mode == 2:\n self.smart_conditions(t_m_old, electricity_input)\n\n if self.controller_heatpumpmode == 'heating':\n state = 1\n if self.controller_heatpumpmode == 'cooling':\n state = -1\n if self.controller_heatpumpmode == 'off':\n state = 0\n stsv.set_output_value(self.stateC, state)\n\n def conditions(self, set_temp):\n maximum_heating_set_temp = self.t_set_heating + self.offset\n minimum_heating_set_temp = self.t_set_heating\n minimum_cooling_set_temp = self.t_set_cooling - self.offset\n maximum_cooling_set_temp = self.t_set_cooling\n\n if self.controller_heatpumpmode == 'heating': # and daily_avg_temp < 15:\n if set_temp > maximum_heating_set_temp: # 23\n self.controller_heatpumpmode = 'off'\n return\n if self.controller_heatpumpmode == 'cooling':\n if set_temp < minimum_cooling_set_temp: # 24\n self.controller_heatpumpmode = 'off'\n return\n if self.controller_heatpumpmode == 'off':\n #if pvs_surplus > ? and air_temp < minimum_heating_air + 2:\n if set_temp < minimum_heating_set_temp: # 21\n self.controller_heatpumpmode = 'heating'\n return\n if set_temp > maximum_cooling_set_temp: # 26\n self.controller_heatpumpmode = 'cooling'\n return\n\n def smart_conditions(self, set_temp, electricity_input):\n smart_offset_upper = 3\n smart_offset_lower = 0.5\n maximum_heating_set_temp = self.t_set_heating + self.offset\n if electricity_input < 0:\n maximum_heating_set_temp += smart_offset_upper\n # maximum_heating_set_temp = self.t_set_heating\n minimum_heating_set_temp = self.t_set_heating\n if electricity_input < 0:\n minimum_heating_set_temp += smart_offset_lower\n minimum_cooling_set_temp = self.t_set_cooling - self.offset\n # minimum_cooling_set_temp = self.t_set_cooling\n maximum_cooling_set_temp = self.t_set_cooling\n\n if self.controller_heatpumpmode == 'heating': # and daily_avg_temp < 15:\n if set_temp > maximum_heating_set_temp: # 23\n self.controller_heatpumpmode = 'off'\n return\n if self.controller_heatpumpmode == 'cooling':\n if set_temp < minimum_cooling_set_temp: # 24\n self.controller_heatpumpmode = 'off'\n return\n if self.controller_heatpumpmode == 'off':\n # if pvs_surplus > ? and air_temp < minimum_heating_air + 2:\n if set_temp < minimum_heating_set_temp: # 21\n self.controller_heatpumpmode = 'heating'\n return\n if set_temp > maximum_cooling_set_temp: # 26\n self.controller_heatpumpmode = 'cooling'\n return\n\n #if timestep >= 60*24*30*3 and timestep <= 60*24*30*9:\n # state = 0\n\n #log.information(\"Final state: {}\\n\".format(state))\n\n def prin1t_outpu1t(self, t_m, state):\n log.information(\"==========================================\")\n log.information(\"T m: {}\".format(t_m))\n log.information(\"State: {}\".format(state))\n\n" ]
[ [ "numpy.polyfit", "matplotlib.rc" ] ]
techiaith/docker-wav2vec2-xlsr-ft-cy
[ "4f7656e232ec890b877caf66596bf0359c63b3f5" ]
[ "train/python/train_kenlm.py" ]
[ "import os\nimport io\nimport sys\nimport json\nimport yaml\nimport shlex\nimport subprocess\n\nimport torch\nimport torchaudio\nimport optuna\nimport text_preprocess\n\nfrom pathlib import Path\nfrom ctcdecode import CTCBeamDecoder\nfrom datasets import load_dataset, load_metric, set_caching_enabled\nfrom transformers import Wav2Vec2ForCTC, Wav2Vec2Processor\n\nfrom argparse import ArgumentParser, RawTextHelpFormatter\n\n\nDESCRIPTION = \"\"\"\n\nTrain and optimize a KenLM language model from HuggingFace's provision of the Welsh corpus by the OSCAR project.\n\n\"\"\"\n\nset_caching_enabled(False)\n\n\n# Preprocessing the datasets.\ndef speech_file_to_array_fn(batch):\n batch[\"sentence\"] = text_preprocess.cleanup(batch[\"sentence\"]).strip() # + \" \"\n speech_array, sampling_rate = torchaudio.load(batch[\"path\"])\n batch[\"speech\"] = resampler(speech_array).squeeze().numpy()\n return batch\n\n\ndef decode(batch):\n inputs = processor(batch[\"speech\"], sampling_rate=16_000, return_tensors=\"pt\", padding=True) \n with torch.no_grad():\n logits = model(inputs.input_values.to(\"cuda\"), attention_mask=inputs.attention_mask.to(\"cuda\")).logits\n\n beam_results, beam_scores, timesteps, out_lens = ctcdecoder.decode(logits)\n batch[\"pred_strings_with_lm\"] = \"\".join(vocab[n] for n in beam_results[0][0][:out_lens[0][0]]).strip()\n\n return batch\n\n\ndef optimize_lm_objective(trial): \n global ctcdecoder\n \n alpha = trial.suggest_uniform('lm_alpha', 0, 6)\n beta = trial.suggest_uniform('lm_beta',0, 5)\n\n try:\n binarylm_file_path=os.path.join(lm_model_dir, \"lm.binary\")\n ctcdecoder = CTCBeamDecoder(vocab, \n model_path=binarylm_file_path,\n alpha=alpha,\n beta=beta,\n cutoff_top_n=40,\n cutoff_prob=1.0,\n beam_width=100,\n num_processes=4,\n blank_id=processor.tokenizer.pad_token_id,\n log_probs_input=True\n )\n result = test_dataset.map(decode)\n result_wer = wer.compute(predictions=result[\"pred_strings_with_lm\"], references=result[\"sentence\"])\n trial.report(result_wer, step=0)\n\n except Exception as e:\n print (e)\n raise\n\n finally:\n return result_wer \n\n\n\ndef train(lm_dir, oscar_dataset_name):\n\n Path(lm_dir).mkdir(parents=True, exist_ok=True) \n corpus_file_path = os.path.join(lm_dir, \"corpus.txt\")\n\n print (\"\\nLoading OSCAR {} dataset...\".format(oscar_dataset_name))\n oscar_corpus = load_dataset(\"oscar\", oscar_dataset_name)\n\n print (\"\\nExporting OSCAR to text file {}...\".format(corpus_file_path))\n with open(corpus_file_path, 'w', encoding='utf-8') as corpus_file:\n for line in oscar_corpus[\"train\"]:\n t = text_preprocess.cleanup(line[\"text\"])\n corpus_file.write(t)\n\n # generate KenLM ARPA file language model\n lm_arpa_file_path=os.path.join(lm_dir, \"lm.arpa\")\n lm_bin_file_path=os.path.join(lm_dir, \"lm.binary\")\n\n cmd = \"lmplz -o {n} --text {corpus_file} --arpa {lm_file}\".format(n=5, corpus_file=corpus_file_path, lm_file=lm_arpa_file_path)\n print (cmd)\n\n subprocess.run(shlex.split(cmd), stderr=sys.stderr, stdout=sys.stdout)\n\n # generate binary version\n cmd = \"build_binary trie {arpa_file} {bin_file}\".format(arpa_file=lm_arpa_file_path, bin_file=lm_bin_file_path)\n print (cmd)\n\n subprocess.run(shlex.split(cmd), stderr=sys.stderr, stdout=sys.stdout)\n\n #\n os.remove(corpus_file_path)\n os.remove(lm_arpa_file_path)\n\n return lm_dir\n\n\n\ndef optimize(lm_dir, wav2vec_model_path):\n global processor\n global model\n global vocab\n global wer\n global resampler\n global test_dataset\n global lm_model_dir\n\n lm_model_dir=lm_dir\n\n test_dataset = load_dataset(\"custom_common_voice.py\", \"cy\", split=\"test\")\n #test_dataset = load_dataset(\"common_voice\", \"cy\", split=\"test\")\n\n wer = load_metric(\"wer\")\n\n processor = Wav2Vec2Processor.from_pretrained(wav2vec_model_path)\n model = Wav2Vec2ForCTC.from_pretrained(wav2vec_model_path)\n\n model.to(\"cuda\")\n\n resampler = torchaudio.transforms.Resample(48_000, 16_000)\n\n vocab=processor.tokenizer.convert_ids_to_tokens(range(0, processor.tokenizer.vocab_size))\n space_ix = vocab.index('|')\n vocab[space_ix]=' '\n\n print (\"Preprocessing speech files\")\n test_dataset = test_dataset.map(speech_file_to_array_fn)\n\n\n print (\"Beginning alpha and beta hyperparameter optimization\")\n study = optuna.create_study()\n study.optimize(optimize_lm_objective, n_jobs=1, n_trials=100)\n\n #\n lm_best = {'alpha':study.best_params['lm_alpha'], 'beta':study.best_params['lm_beta']}\n\n config_file_path = os.path.join(lm_model_dir, \"config_ctc.yaml\")\n with open (config_file_path, 'w') as config_file:\n yaml.dump(lm_best, config_file)\n\n print('Best params saved to config file {}: alpha={}, beta={} with WER={}'.format(config_file_path, study.best_params['lm_alpha'], study.best_params['lm_beta'], study.best_value))\n\n\n\ndef main(lm_root_dir, wav2vec2_model_path, **args):\n lm_file_path=train_kenlm(lm_root_dir, \"unshuffled_deduplicated_cy\")\n optimize_kenlm(lm_file_path, wav2vec2_model_path) \n\n\n\nif __name__ == \"__main__\":\n\n parser = ArgumentParser(description=DESCRIPTION, formatter_class=RawTextHelpFormatter) \n\n parser.add_argument(\"--target_dir\", dest=\"lm_root_dir\", required=True, help=\"target directory for language model\")\n parser.add_argument(\"--model\", dest=\"wav2vec_model_path\", required=True, help=\"acoustic model to be used for optimizing\")\n \n parser.set_defaults(func=main)\n args = parser.parse_args()\n args.func(**vars(args))\n\n" ]
[ [ "torch.no_grad" ] ]
sappelhoff/mne-python
[ "3d4a4f66232448de542ac076df7b57115644978a" ]
[ "mne/tests/test_epochs.py" ]
[ "# -*- coding: utf-8 -*-\n# Author: Alexandre Gramfort <[email protected]>\n# Denis Engemann <[email protected]>\n# Stefan Appelhoff <[email protected]>\n#\n# License: BSD-3-Clause\n\nfrom copy import deepcopy\nfrom functools import partial\nfrom io import BytesIO\nimport os\nimport os.path as op\nimport pickle\n\nimport pytest\nfrom numpy.testing import (assert_array_equal, assert_array_almost_equal,\n assert_allclose, assert_equal, assert_array_less)\nimport numpy as np\nfrom numpy.fft import rfft, rfftfreq\nimport matplotlib.pyplot as plt\nimport scipy.signal\n\nimport mne\nfrom mne import (Epochs, Annotations, read_events, pick_events, read_epochs,\n equalize_channels, pick_types, pick_channels, read_evokeds,\n write_evokeds, create_info, make_fixed_length_events,\n make_fixed_length_epochs, combine_evoked)\nfrom mne.baseline import rescale\nfrom mne.datasets import testing\nfrom mne.chpi import read_head_pos, head_pos_to_trans_rot_t\nfrom mne.event import merge_events\nfrom mne.io import RawArray, read_raw_fif\nfrom mne.io.constants import FIFF\nfrom mne.io.proj import _has_eeg_average_ref_proj\nfrom mne.io.write import write_int, INT32_MAX, _get_split_size\nfrom mne.preprocessing import maxwell_filter\nfrom mne.epochs import (\n bootstrap, equalize_epoch_counts, combine_event_ids, add_channels_epochs,\n EpochsArray, concatenate_epochs, BaseEpochs, average_movements,\n _handle_event_repeated, make_metadata)\nfrom mne.utils import (requires_pandas, object_diff, use_log_level,\n catch_logging, _FakeNoPandas,\n assert_meg_snr, check_version, _dt_to_stamp)\n\ndata_path = testing.data_path(download=False)\nfname_raw_testing = op.join(data_path, 'MEG', 'sample',\n 'sample_audvis_trunc_raw.fif')\nfname_raw_move = op.join(data_path, 'SSS', 'test_move_anon_raw.fif')\nfname_raw_movecomp_sss = op.join(\n data_path, 'SSS', 'test_move_anon_movecomp_raw_sss.fif')\nfname_raw_move_pos = op.join(data_path, 'SSS', 'test_move_anon_raw.pos')\n\nbase_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')\nraw_fname = op.join(base_dir, 'test_raw.fif')\nevent_name = op.join(base_dir, 'test-eve.fif')\nevoked_nf_name = op.join(base_dir, 'test-nf-ave.fif')\n\nevent_id, tmin, tmax = 1, -0.2, 0.5\nevent_id_2 = np.int64(2) # to test non Python int types\nrng = np.random.RandomState(42)\n\n\ndef test_event_repeated():\n \"\"\"Test epochs takes into account repeated events.\"\"\"\n n_samples = 100\n n_channels = 2\n ch_names = ['chan%i' % i for i in range(n_channels)]\n info = mne.create_info(ch_names=ch_names, sfreq=1000.)\n data = np.zeros((n_channels, n_samples))\n raw = mne.io.RawArray(data, info)\n\n events = np.array([[10, 0, 1], [10, 0, 2]])\n epochs = mne.Epochs(raw, events, event_repeated='drop')\n assert epochs.drop_log == ((), ('DROP DUPLICATE',))\n assert_array_equal(epochs.selection, [0])\n epochs = mne.Epochs(raw, events, event_repeated='merge')\n assert epochs.drop_log == ((), ('MERGE DUPLICATE',))\n assert_array_equal(epochs.selection, [0])\n\n\ndef test_handle_event_repeated():\n \"\"\"Test handling of repeated events.\"\"\"\n # A general test case\n EVENT_ID = {'aud': 1, 'vis': 2, 'foo': 3}\n EVENTS = np.array([[0, 0, 1], [0, 0, 2],\n [3, 0, 2], [3, 0, 1],\n [5, 0, 2], [5, 0, 1], [5, 0, 3],\n [7, 0, 1]])\n SELECTION = np.arange(len(EVENTS))\n DROP_LOG = ((),) * len(EVENTS)\n with pytest.raises(RuntimeError, match='Event time samples were not uniq'):\n _handle_event_repeated(EVENTS, EVENT_ID, event_repeated='error',\n selection=SELECTION,\n drop_log=DROP_LOG)\n\n events, event_id, selection, drop_log = _handle_event_repeated(\n EVENTS, EVENT_ID, 'drop', SELECTION, DROP_LOG)\n assert_array_equal(events, [[0, 0, 1], [3, 0, 2], [5, 0, 2], [7, 0, 1]])\n assert_array_equal(events, EVENTS[selection])\n unselection = np.setdiff1d(SELECTION, selection)\n assert all(drop_log[k] == ('DROP DUPLICATE',) for k in unselection)\n assert event_id == {'aud': 1, 'vis': 2}\n\n events, event_id, selection, drop_log = _handle_event_repeated(\n EVENTS, EVENT_ID, 'merge', SELECTION, DROP_LOG)\n assert_array_equal(events[0][-1], events[1][-1])\n assert_array_equal(events, [[0, 0, 4], [3, 0, 4], [5, 0, 5], [7, 0, 1]])\n assert_array_equal(events[:, :2], EVENTS[selection][:, :2])\n unselection = np.setdiff1d(SELECTION, selection)\n assert all(drop_log[k] == ('MERGE DUPLICATE',) for k in unselection)\n assert set(event_id.keys()) == set(['aud', 'aud/vis', 'aud/foo/vis'])\n assert event_id['aud/vis'] == 4\n\n # Test early return with no changes: no error for wrong event_repeated arg\n fine_events = np.array([[0, 0, 1], [1, 0, 2]])\n events, event_id, selection, drop_log = _handle_event_repeated(\n fine_events, EVENT_ID, 'no', [0, 2], DROP_LOG)\n assert event_id == EVENT_ID\n assert_array_equal(selection, [0, 2])\n assert drop_log == DROP_LOG\n assert_array_equal(events, fine_events)\n del fine_events\n\n # Test falling back on 0 for heterogeneous \"prior-to-event\" codes\n # order of third column does not determine new event_id key, we always\n # take components, sort, and join on \"/\"\n # should make new event_id value: 5 (because 1,2,3,4 are taken)\n heterogeneous_events = np.array([[0, 3, 2], [0, 4, 1]])\n events, event_id, selection, drop_log = _handle_event_repeated(\n heterogeneous_events, EVENT_ID, 'merge', [0, 1], deepcopy(DROP_LOG))\n assert set(event_id.keys()) == set(['aud/vis'])\n assert event_id['aud/vis'] == 5\n assert_array_equal(selection, [0])\n assert drop_log[1] == ('MERGE DUPLICATE',)\n assert_array_equal(events, np.array([[0, 0, 5], ]))\n del heterogeneous_events\n\n # Test keeping a homogeneous \"prior-to-event\" code (=events[:, 1])\n homogeneous_events = np.array([[0, 99, 1], [0, 99, 2],\n [1, 0, 1], [2, 0, 2]])\n events, event_id, selection, drop_log = _handle_event_repeated(\n homogeneous_events, EVENT_ID, 'merge', [1, 3, 4, 7],\n deepcopy(DROP_LOG))\n assert set(event_id.keys()) == set(['aud', 'vis', 'aud/vis'])\n assert_array_equal(events, np.array([[0, 99, 4], [1, 0, 1], [2, 0, 2]]))\n assert_array_equal(selection, [1, 4, 7])\n assert drop_log[3] == ('MERGE DUPLICATE',)\n del homogeneous_events\n\n # Test dropping instead of merging, if event_codes to be merged are equal\n equal_events = np.array([[0, 0, 1], [0, 0, 1]])\n events, event_id, selection, drop_log = _handle_event_repeated(\n equal_events, EVENT_ID, 'merge', [3, 5], deepcopy(DROP_LOG))\n assert_array_equal(events, np.array([[0, 0, 1], ]))\n assert_array_equal(selection, [3])\n assert drop_log[5] == ('MERGE DUPLICATE',)\n assert set(event_id.keys()) == set(['aud'])\n\n # new numbers\n for vals, want in (((1, 3), 2), ((2, 3), 1), ((1, 2), 3)):\n events = np.zeros((2, 3), int)\n events[:, 2] = vals\n event_id = {str(v): v for v in events[:, 2]}\n selection = np.arange(len(events))\n drop_log = [tuple() for _ in range(len(events))]\n events, event_id, selection, drop_log = _handle_event_repeated(\n events, event_id, 'merge', selection, drop_log)\n want = np.array([[0, 0, want]])\n assert_array_equal(events, want)\n\n\ndef _get_data(preload=False):\n \"\"\"Get data.\"\"\"\n raw = read_raw_fif(raw_fname, preload=preload, verbose='warning')\n events = read_events(event_name)\n picks = pick_types(raw.info, meg=True, eeg=True, stim=True,\n ecg=True, eog=True, include=['STI 014'],\n exclude='bads')\n return raw, events, picks\n\n\nreject = dict(grad=1000e-12, mag=4e-12, eeg=80e-6, eog=150e-6)\nflat = dict(grad=1e-15, mag=1e-15)\n\n\ndef test_get_data():\n \"\"\"Test the .get_data() method.\"\"\"\n raw, events, picks = _get_data()\n event_id = {'a/1': 1, 'a/2': 2, 'b/1': 3, 'b/2': 4}\n epochs = Epochs(raw, events, event_id, preload=True)\n\n # Testing with respect to units param\n # more tests in mne/io/tests/test_raw.py::test_get_data_units\n # EEG is already in V, so no conversion should take place\n d1 = epochs.get_data(picks=\"eeg\", units=None)\n d2 = epochs.get_data(picks=\"eeg\", units=\"V\")\n assert_array_equal(d1, d2)\n\n with pytest.raises(ValueError, match=\"is not a valid unit for eeg\"):\n epochs.get_data(picks=\"eeg\", units=\"\")\n\n with pytest.raises(ValueError, match=\"cannot be str if there is more\"):\n epochs.get_data(picks=[\"eeg\", \"meg\"], units=\"V\")\n\n # Check combination of units with item param, scale only one ch_type\n d3 = epochs.get_data(item=[1, 2, 3], units={\"grad\": \"fT/cm\"})\n assert d3.shape[0] == 3\n\n grad_idxs = np.array([i == \"grad\" for i in epochs.get_channel_types()])\n eeg_idxs = np.array([i == \"eeg\" for i in epochs.get_channel_types()])\n assert_array_equal(\n d3[:, grad_idxs, :],\n epochs.get_data(\"grad\", item=[1, 2, 3]) * 1e13 # T/m to fT/cm\n )\n assert_array_equal(\n d3[:, eeg_idxs, :],\n epochs.get_data(\"eeg\", item=[1, 2, 3])\n )\n\n # Test tmin/tmax\n data = epochs.get_data(tmin=0)\n assert np.all(data.shape[-1] ==\n epochs._data.shape[-1] -\n np.nonzero(epochs.times == 0)[0])\n\n assert epochs.get_data(tmin=0, tmax=0).size == 0\n\n with pytest.raises(TypeError, match='tmin .* float, None'):\n epochs.get_data(tmin=[1], tmax=1)\n\n with pytest.raises(TypeError, match='tmax .* float, None'):\n epochs.get_data(tmin=1, tmax=np.ones(5))\n\n\ndef test_hierarchical():\n \"\"\"Test hierarchical access.\"\"\"\n raw, events, picks = _get_data()\n event_id = {'a/1': 1, 'a/2': 2, 'b/1': 3, 'b/2': 4}\n epochs = Epochs(raw, events, event_id, preload=True)\n epochs_a1 = epochs['a/1']\n epochs_a2 = epochs['a/2']\n epochs_b1 = epochs['b/1']\n epochs_b2 = epochs['b/2']\n epochs_a = epochs['a']\n assert_equal(len(epochs_a), len(epochs_a1) + len(epochs_a2))\n epochs_b = epochs['b']\n assert_equal(len(epochs_b), len(epochs_b1) + len(epochs_b2))\n epochs_1 = epochs['1']\n assert_equal(len(epochs_1), len(epochs_a1) + len(epochs_b1))\n epochs_2 = epochs['2']\n assert_equal(len(epochs_2), len(epochs_a2) + len(epochs_b2))\n epochs_all = epochs[('1', '2')]\n assert_equal(len(epochs), len(epochs_all))\n assert_array_equal(epochs.get_data(), epochs_all.get_data())\n\n\[email protected]\[email protected]_testing_data\ndef test_average_movements():\n \"\"\"Test movement averaging algorithm.\"\"\"\n # usable data\n crop = 0., 10.\n origin = (0., 0., 0.04)\n raw = read_raw_fif(fname_raw_move, allow_maxshield='yes')\n raw.info['bads'] += ['MEG2443'] # mark some bad MEG channel\n raw.crop(*crop).load_data()\n raw.filter(None, 20, fir_design='firwin')\n events = make_fixed_length_events(raw, event_id)\n picks = pick_types(raw.info, meg=True, eeg=True, stim=True,\n ecg=True, eog=True, exclude=())\n epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, proj=False,\n preload=True)\n epochs_proj = Epochs(raw, events[:1], event_id, tmin, tmax, picks=picks,\n proj=True, preload=True)\n raw_sss_stat = maxwell_filter(raw, origin=origin, regularize=None,\n bad_condition='ignore')\n del raw\n epochs_sss_stat = Epochs(raw_sss_stat, events, event_id, tmin, tmax,\n picks=picks, proj=False)\n evoked_sss_stat = epochs_sss_stat.average()\n del raw_sss_stat, epochs_sss_stat\n head_pos = read_head_pos(fname_raw_move_pos)\n trans = epochs.info['dev_head_t']['trans']\n head_pos_stat = (np.array([trans[:3, 3]]),\n np.array([trans[:3, :3]]),\n np.array([0.]))\n\n # SSS-based\n pytest.raises(TypeError, average_movements, epochs, None)\n evoked_move_non = average_movements(epochs, head_pos=head_pos,\n weight_all=False, origin=origin)\n evoked_move_all = average_movements(epochs, head_pos=head_pos,\n weight_all=True, origin=origin)\n evoked_stat_all = average_movements(epochs, head_pos=head_pos_stat,\n weight_all=True, origin=origin)\n evoked_std = epochs.average()\n for ev in (evoked_move_non, evoked_move_all, evoked_stat_all):\n assert_equal(ev.nave, evoked_std.nave)\n assert_equal(len(ev.info['bads']), 0)\n # substantial changes to MEG data\n for ev in (evoked_move_non, evoked_stat_all):\n assert_meg_snr(ev, evoked_std, 0., 0.1)\n pytest.raises(AssertionError, assert_meg_snr,\n ev, evoked_std, 1., 1.)\n meg_picks = pick_types(evoked_std.info, meg=True, exclude=())\n assert_allclose(evoked_move_non.data[meg_picks],\n evoked_move_all.data[meg_picks], atol=1e-20)\n # compare to averaged movecomp version (should be fairly similar)\n raw_sss = read_raw_fif(fname_raw_movecomp_sss)\n raw_sss.crop(*crop).load_data()\n raw_sss.filter(None, 20, fir_design='firwin')\n picks_sss = pick_types(raw_sss.info, meg=True, eeg=True, stim=True,\n ecg=True, eog=True, exclude=())\n assert_array_equal(picks, picks_sss)\n epochs_sss = Epochs(raw_sss, events, event_id, tmin, tmax,\n picks=picks_sss, proj=False)\n evoked_sss = epochs_sss.average()\n assert_equal(evoked_std.nave, evoked_sss.nave)\n # this should break the non-MEG channels\n pytest.raises(AssertionError, assert_meg_snr,\n evoked_sss, evoked_move_all, 0., 0.)\n assert_meg_snr(evoked_sss, evoked_move_non, 0.02, 2.6)\n assert_meg_snr(evoked_sss, evoked_stat_all, 0.05, 3.2)\n # these should be close to numerical precision\n assert_allclose(evoked_sss_stat.data, evoked_stat_all.data, atol=1e-20)\n\n # pos[0] > epochs.events[0] uses dev_head_t, so make it equivalent\n destination = deepcopy(epochs.info['dev_head_t'])\n x = head_pos_to_trans_rot_t(head_pos[1])\n epochs.info['dev_head_t']['trans'][:3, :3] = x[1]\n epochs.info['dev_head_t']['trans'][:3, 3] = x[0]\n pytest.raises(AssertionError, assert_allclose,\n epochs.info['dev_head_t']['trans'],\n destination['trans'])\n evoked_miss = average_movements(epochs, head_pos=head_pos[2:],\n origin=origin, destination=destination)\n assert_allclose(evoked_miss.data, evoked_move_all.data,\n atol=1e-20)\n assert_allclose(evoked_miss.info['dev_head_t']['trans'],\n destination['trans'])\n\n # degenerate cases\n destination['to'] = destination['from'] # bad dest\n pytest.raises(RuntimeError, average_movements, epochs, head_pos,\n origin=origin, destination=destination)\n pytest.raises(TypeError, average_movements, 'foo', head_pos=head_pos)\n pytest.raises(RuntimeError, average_movements, epochs_proj,\n head_pos=head_pos) # prj\n\n\ndef _assert_drop_log_types(drop_log):\n __tracebackhide__ = True\n assert isinstance(drop_log, tuple), 'drop_log should be tuple'\n assert all(isinstance(log, tuple) for log in drop_log), \\\n 'drop_log[ii] should be tuple'\n assert all(isinstance(s, str) for log in drop_log for s in log), \\\n 'drop_log[ii][jj] should be str'\n\n\ndef test_reject():\n \"\"\"Test epochs rejection.\"\"\"\n raw, events, _ = _get_data()\n names = raw.ch_names[::5]\n assert 'MEG 2443' in names\n raw.pick(names).load_data()\n assert 'eog' in raw\n raw.info.normalize_proj()\n picks = np.arange(len(raw.ch_names))\n # cull the list just to contain the relevant event\n events = events[events[:, 2] == event_id, :]\n assert len(events) == 7\n selection = np.arange(3)\n drop_log = ((),) * 3 + (('MEG 2443',),) * 4\n _assert_drop_log_types(drop_log)\n pytest.raises(TypeError, pick_types, raw)\n picks_meg = pick_types(raw.info, meg=True, eeg=False)\n pytest.raises(TypeError, Epochs, raw, events, event_id, tmin, tmax,\n picks=picks, preload=False, reject='foo')\n pytest.raises(ValueError, Epochs, raw, events, event_id, tmin, tmax,\n picks=picks_meg, preload=False, reject=dict(eeg=1.))\n # this one is okay because it's not actually requesting rejection\n Epochs(raw, events, event_id, tmin, tmax, picks=picks_meg,\n preload=False, reject=dict(eeg=np.inf))\n for val in (None, -1): # protect against older MNE-C types\n for kwarg in ('reject', 'flat'):\n pytest.raises(ValueError, Epochs, raw, events, event_id,\n tmin, tmax, picks=picks_meg, preload=False,\n **{kwarg: dict(grad=val)})\n pytest.raises(KeyError, Epochs, raw, events, event_id, tmin, tmax,\n picks=picks, preload=False, reject=dict(foo=1.))\n\n data_7 = dict()\n keep_idx = [0, 1, 2]\n for preload in (True, False):\n for proj in (True, False, 'delayed'):\n # no rejection\n epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n preload=preload)\n _assert_drop_log_types(epochs.drop_log)\n pytest.raises(ValueError, epochs.drop_bad, reject='foo')\n epochs.drop_bad()\n assert_equal(len(epochs), len(events))\n assert_array_equal(epochs.selection, np.arange(len(events)))\n assert epochs.drop_log == ((),) * 7\n if proj not in data_7:\n data_7[proj] = epochs.get_data()\n assert_array_equal(epochs.get_data(), data_7[proj])\n\n # with rejection\n epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n reject=reject, preload=preload)\n _assert_drop_log_types(epochs.drop_log)\n epochs.drop_bad()\n _assert_drop_log_types(epochs.drop_log)\n assert_equal(len(epochs), len(events) - 4)\n assert_array_equal(epochs.selection, selection)\n assert epochs.drop_log == drop_log\n assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])\n\n # rejection post-hoc\n epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n preload=preload)\n epochs.drop_bad()\n assert_equal(len(epochs), len(events))\n assert_array_equal(epochs.get_data(), data_7[proj])\n epochs.drop_bad(reject)\n assert_equal(len(epochs), len(events) - 4)\n assert_equal(len(epochs), len(epochs.get_data()))\n assert_array_equal(epochs.selection, selection)\n assert epochs.drop_log == drop_log\n assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])\n\n # rejection twice\n reject_part = dict(grad=1100e-12, mag=4e-12, eeg=80e-6, eog=150e-6)\n epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n reject=reject_part, preload=preload)\n epochs.drop_bad()\n assert_equal(len(epochs), len(events) - 1)\n epochs.drop_bad(reject)\n assert_equal(len(epochs), len(events) - 4)\n assert_array_equal(epochs.selection, selection)\n assert epochs.drop_log == drop_log\n assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])\n\n # ensure that thresholds must become more stringent, not less\n pytest.raises(ValueError, epochs.drop_bad, reject_part)\n assert_equal(len(epochs), len(events) - 4)\n assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])\n epochs.drop_bad(flat=dict(mag=1.))\n assert_equal(len(epochs), 0)\n pytest.raises(ValueError, epochs.drop_bad,\n flat=dict(mag=0.))\n\n # rejection of subset of trials (ensure array ownership)\n reject_part = dict(grad=1100e-12, mag=4e-12, eeg=80e-6, eog=150e-6)\n epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n reject=None, preload=preload)\n epochs = epochs[:-1]\n epochs.drop_bad(reject=reject)\n assert_equal(len(epochs), len(events) - 4)\n assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])\n\n # rejection on annotations\n sfreq = raw.info['sfreq']\n onsets = [(event[0] - raw.first_samp) / sfreq for event in\n events[::2][:3]]\n onsets[0] = onsets[0] + tmin - 0.499 # tmin < 0\n onsets[1] = onsets[1] + tmax - 0.001\n stamp = _dt_to_stamp(raw.info['meas_date'])\n first_time = (stamp[0] + stamp[1] * 1e-6 + raw.first_samp / sfreq)\n for orig_time in [None, first_time]:\n annot = Annotations(onsets, [0.5, 0.5, 0.5], 'BAD', orig_time)\n raw.set_annotations(annot)\n epochs = Epochs(raw, events, event_id, tmin, tmax, picks=[0],\n reject=None, preload=preload)\n epochs.drop_bad()\n assert_equal(len(events) - 3, len(epochs.events))\n assert_equal(epochs.drop_log[0][0], 'BAD')\n assert_equal(epochs.drop_log[2][0], 'BAD')\n assert_equal(epochs.drop_log[4][0], 'BAD')\n raw.set_annotations(None)\n\n\ndef test_reject_by_annotations_reject_tmin_reject_tmax():\n \"\"\"Test reject_by_annotations with reject_tmin and reject_tmax defined.\"\"\"\n # 10 seconds of data, event at 2s, bad segment from 1s to 1.5s\n info = mne.create_info(ch_names=['test_a'], sfreq=1000, ch_types='eeg')\n raw = mne.io.RawArray(np.atleast_2d(np.arange(0, 10, 1 / 1000)), info=info)\n events = np.array([[2000, 0, 1]])\n raw.set_annotations(mne.Annotations(1, 0.5, 'BAD'))\n\n # Make the epoch based on the event at 2s, so from 1s to 3s ... assert it\n # is rejected due to bad segment overlap from 1s to 1.5s\n epochs = mne.Epochs(raw, events, tmin=-1, tmax=1,\n preload=True, reject_by_annotation=True)\n assert len(epochs) == 0\n\n # Setting `reject_tmin` to prevent rejection of epoch.\n epochs = mne.Epochs(raw, events, tmin=-1, tmax=1, reject_tmin=-0.2,\n preload=True, reject_by_annotation=True)\n assert len(epochs) == 1\n\n # Same check but bad segment overlapping from 2.5s to 3s: use `reject_tmax`\n raw.set_annotations(mne.Annotations(2.5, 0.5, 'BAD'))\n epochs = mne.Epochs(raw, events, tmin=-1, tmax=1, reject_tmax=0.4,\n preload=True, reject_by_annotation=True)\n assert len(epochs) == 1\n\n\ndef test_own_data():\n \"\"\"Test for epochs data ownership (gh-5346).\"\"\"\n raw, events = _get_data()[:2]\n n_epochs = 10\n events = events[:n_epochs]\n epochs = mne.Epochs(raw, events, preload=True)\n assert epochs._data.flags['C_CONTIGUOUS']\n assert epochs._data.flags['OWNDATA']\n epochs.crop(tmin=-0.1, tmax=0.4)\n assert len(epochs) == epochs._data.shape[0] == len(epochs.events)\n assert len(epochs) == n_epochs\n assert not epochs._data.flags['OWNDATA']\n\n # data ownership value error\n epochs.drop_bad(flat=dict(eeg=8e-6))\n n_now = len(epochs)\n assert 5 < n_now < n_epochs\n assert len(epochs) == epochs._data.shape[0] == len(epochs.events)\n\n good_chan = epochs.copy().pick_channels([epochs.ch_names[0]])\n good_chan.rename_channels({good_chan.ch_names[0]: 'good'})\n epochs.add_channels([good_chan])\n # \"ValueError: resize only works on single-segment arrays\"\n epochs.drop_bad(flat=dict(eeg=10e-6))\n assert 1 < len(epochs) < n_now\n\n\ndef test_decim():\n \"\"\"Test epochs decimation.\"\"\"\n # First with EpochsArray\n dec_1, dec_2 = 2, 3\n decim = dec_1 * dec_2\n n_epochs, n_channels, n_times = 5, 10, 20\n sfreq = 1000.\n sfreq_new = sfreq / decim\n data = rng.randn(n_epochs, n_channels, n_times)\n events = np.array([np.arange(n_epochs), [0] * n_epochs, [1] * n_epochs]).T\n info = create_info(n_channels, sfreq, 'eeg')\n with info._unlock():\n info['lowpass'] = sfreq_new / float(decim)\n epochs = EpochsArray(data, info, events)\n data_epochs = epochs.copy().decimate(decim).get_data()\n data_epochs_2 = epochs.copy().decimate(decim, offset=1).get_data()\n data_epochs_3 = epochs.decimate(dec_1).decimate(dec_2).get_data()\n assert_array_equal(data_epochs, data[:, :, ::decim])\n assert_array_equal(data_epochs_2, data[:, :, 1::decim])\n assert_array_equal(data_epochs, data_epochs_3)\n\n # Now let's do it with some real data\n raw, events, picks = _get_data()\n events = events[events[:, 2] == 1][:2]\n raw.load_data().pick_channels([raw.ch_names[pick] for pick in picks[::30]])\n raw.info.normalize_proj()\n del picks\n sfreq_new = raw.info['sfreq'] / decim\n with raw.info._unlock():\n raw.info['lowpass'] = sfreq_new / 12. # suppress aliasing warnings\n pytest.raises(ValueError, epochs.decimate, -1)\n pytest.raises(ValueError, epochs.decimate, 2, offset=-1)\n pytest.raises(ValueError, epochs.decimate, 2, offset=2)\n for this_offset in range(decim):\n epochs = Epochs(raw, events, event_id,\n tmin=-this_offset / raw.info['sfreq'], tmax=tmax,\n baseline=None)\n idx_offsets = np.arange(decim) + this_offset\n for offset, idx_offset in zip(np.arange(decim), idx_offsets):\n expected_times = epochs.times[idx_offset::decim]\n expected_data = epochs.get_data()[:, :, idx_offset::decim]\n must_have = offset / float(epochs.info['sfreq'])\n assert (np.isclose(must_have, expected_times).any())\n ep_decim = epochs.copy().decimate(decim, offset)\n assert (np.isclose(must_have, ep_decim.times).any())\n assert_allclose(ep_decim.times, expected_times)\n assert_allclose(ep_decim.get_data(), expected_data)\n assert_equal(ep_decim.info['sfreq'], sfreq_new)\n\n # More complicated cases\n epochs = Epochs(raw, events, event_id, tmin, tmax)\n expected_data = epochs.get_data()[:, :, ::decim]\n expected_times = epochs.times[::decim]\n for preload in (True, False):\n # at init\n epochs = Epochs(raw, events, event_id, tmin, tmax, decim=decim,\n preload=preload)\n assert_allclose(epochs.get_data(), expected_data)\n assert_allclose(epochs.get_data(), expected_data)\n assert_equal(epochs.info['sfreq'], sfreq_new)\n assert_array_equal(epochs.times, expected_times)\n\n # split between init and afterward\n epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_1,\n preload=preload).decimate(dec_2)\n assert_allclose(epochs.get_data(), expected_data)\n assert_allclose(epochs.get_data(), expected_data)\n assert_equal(epochs.info['sfreq'], sfreq_new)\n assert_array_equal(epochs.times, expected_times)\n epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_2,\n preload=preload).decimate(dec_1)\n assert_allclose(epochs.get_data(), expected_data)\n assert_allclose(epochs.get_data(), expected_data)\n assert_equal(epochs.info['sfreq'], sfreq_new)\n assert_array_equal(epochs.times, expected_times)\n\n # split between init and afterward, with preload in between\n epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_1,\n preload=preload)\n epochs.load_data()\n epochs = epochs.decimate(dec_2)\n assert_allclose(epochs.get_data(), expected_data)\n assert_allclose(epochs.get_data(), expected_data)\n assert_equal(epochs.info['sfreq'], sfreq_new)\n assert_array_equal(epochs.times, expected_times)\n epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_2,\n preload=preload)\n epochs.load_data()\n epochs = epochs.decimate(dec_1)\n assert_allclose(epochs.get_data(), expected_data)\n assert_allclose(epochs.get_data(), expected_data)\n assert_equal(epochs.info['sfreq'], sfreq_new)\n assert_array_equal(epochs.times, expected_times)\n\n # decimate afterward\n epochs = Epochs(raw, events, event_id, tmin, tmax,\n preload=preload).decimate(decim)\n assert_allclose(epochs.get_data(), expected_data)\n assert_allclose(epochs.get_data(), expected_data)\n assert_equal(epochs.info['sfreq'], sfreq_new)\n assert_array_equal(epochs.times, expected_times)\n\n # decimate afterward, with preload in between\n epochs = Epochs(raw, events, event_id, tmin, tmax, preload=preload)\n epochs.load_data()\n epochs.decimate(decim)\n assert_allclose(epochs.get_data(), expected_data)\n assert_allclose(epochs.get_data(), expected_data)\n assert_equal(epochs.info['sfreq'], sfreq_new)\n assert_array_equal(epochs.times, expected_times)\n\n # test picks when getting data\n picks = [3, 4, 7]\n d1 = epochs.get_data(picks=picks)\n d2 = epochs.get_data()[:, picks]\n assert_array_equal(d1, d2)\n\n\ndef test_base_epochs():\n \"\"\"Test base epochs class.\"\"\"\n raw = _get_data()[0]\n epochs = BaseEpochs(raw.info, None, np.ones((1, 3), int),\n event_id, tmin, tmax)\n pytest.raises(NotImplementedError, epochs.get_data)\n # events have wrong dtype (float)\n with pytest.raises(TypeError, match='events should be a NumPy array'):\n BaseEpochs(raw.info, None, np.ones((1, 3), float), event_id, tmin,\n tmax)\n # events have wrong shape\n with pytest.raises(ValueError, match='events must be of shape'):\n BaseEpochs(raw.info, None, np.ones((1, 3, 2), int), event_id, tmin,\n tmax)\n # events are tuple (like returned by mne.events_from_annotations)\n with pytest.raises(TypeError, match='events should be a NumPy array'):\n BaseEpochs(raw.info, None, (np.ones((1, 3), int), {'foo': 1}))\n\n\ndef test_savgol_filter():\n \"\"\"Test savgol filtering.\"\"\"\n h_freq = 20.\n raw, events = _get_data()[:2]\n epochs = Epochs(raw, events, event_id, tmin, tmax)\n pytest.raises(RuntimeError, epochs.savgol_filter, 10.)\n epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)\n epochs.pick_types(meg='grad')\n freqs = rfftfreq(len(epochs.times), 1. / epochs.info['sfreq'])\n data = np.abs(rfft(epochs.get_data()))\n pass_mask = (freqs <= h_freq / 2. - 5.)\n stop_mask = (freqs >= h_freq * 2 + 5.)\n epochs.savgol_filter(h_freq)\n data_filt = np.abs(rfft(epochs.get_data()))\n # decent in pass-band\n assert_allclose(np.mean(data[:, :, pass_mask], 0),\n np.mean(data_filt[:, :, pass_mask], 0),\n rtol=1e-2, atol=1e-18)\n # suppression in stop-band\n assert (np.mean(data[:, :, stop_mask]) >\n np.mean(data_filt[:, :, stop_mask]) * 5)\n\n\ndef test_filter(tmp_path):\n \"\"\"Test filtering.\"\"\"\n h_freq = 40.\n raw, events = _get_data()[:2]\n epochs = Epochs(raw, events, event_id, tmin, tmax)\n assert round(epochs.info['lowpass']) == 172\n pytest.raises(RuntimeError, epochs.savgol_filter, 10.)\n epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)\n epochs.pick_types(meg='grad')\n freqs = rfftfreq(len(epochs.times), 1. / epochs.info['sfreq'])\n data_fft = np.abs(rfft(epochs.get_data()))\n pass_mask = (freqs <= h_freq / 2. - 5.)\n stop_mask = (freqs >= h_freq * 2 + 5.)\n epochs_orig = epochs.copy()\n epochs.filter(None, h_freq)\n assert epochs.info['lowpass'] == h_freq\n data_filt = epochs.get_data()\n data_filt_fft = np.abs(rfft(data_filt))\n # decent in pass-band\n assert_allclose(np.mean(data_filt_fft[:, :, pass_mask], 0),\n np.mean(data_fft[:, :, pass_mask], 0),\n rtol=5e-2, atol=1e-16)\n # suppression in stop-band\n assert (np.mean(data_fft[:, :, stop_mask]) >\n np.mean(data_filt_fft[:, :, stop_mask]) * 10)\n\n # smoke test for filtering I/O data (gh-5614)\n temp_fname = op.join(str(tmp_path), 'test-epo.fif')\n epochs_orig.save(temp_fname, overwrite=True)\n epochs = mne.read_epochs(temp_fname)\n epochs.filter(None, h_freq)\n assert_allclose(epochs.get_data(), data_filt, atol=1e-17)\n\n\ndef test_epochs_hash():\n \"\"\"Test epoch hashing.\"\"\"\n raw, events = _get_data()[:2]\n epochs = Epochs(raw, events, event_id, tmin, tmax)\n pytest.raises(RuntimeError, epochs.__hash__)\n epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)\n assert_equal(hash(epochs), hash(epochs))\n epochs_2 = Epochs(raw, events, event_id, tmin, tmax, preload=True)\n assert_equal(hash(epochs), hash(epochs_2))\n # do NOT use assert_equal here, failing output is terrible\n assert (pickle.dumps(epochs) == pickle.dumps(epochs_2))\n\n epochs_2._data[0, 0, 0] -= 1\n assert hash(epochs) != hash(epochs_2)\n\n\ndef test_event_ordering():\n \"\"\"Test event order.\"\"\"\n raw, events = _get_data()[:2]\n events2 = events.copy()[::-1]\n Epochs(raw, events, event_id, tmin, tmax, reject=reject, flat=flat)\n with pytest.warns(RuntimeWarning, match='chronologically'):\n Epochs(raw, events2, event_id, tmin, tmax, reject=reject, flat=flat)\n # Duplicate events should be an error...\n events2 = events[[0, 0]]\n events2[:, 2] = [1, 2]\n pytest.raises(RuntimeError, Epochs, raw, events2, event_id=None)\n # But only if duplicates are actually used by event_id\n assert_equal(len(Epochs(raw, events2, event_id=dict(a=1), preload=True)),\n 1)\n\n\ndef test_events_type():\n \"\"\"Test type of events.\"\"\"\n raw, events = _get_data()[:2]\n events_id = {'A': 1, 'B': 2}\n events = (events, events_id)\n with pytest.raises(TypeError, match='events should be a NumPy array'):\n Epochs(raw, events, event_id, tmin, tmax)\n\n\ndef test_rescale():\n \"\"\"Test rescale.\"\"\"\n data = np.array([2, 3, 4, 5], float)\n times = np.array([0, 1, 2, 3], float)\n baseline = (0, 2)\n tester = partial(rescale, data=data, times=times, baseline=baseline)\n assert_allclose(tester(mode='mean'), [-1, 0, 1, 2])\n assert_allclose(tester(mode='ratio'), data / 3.)\n assert_allclose(tester(mode='logratio'), np.log10(data / 3.))\n assert_allclose(tester(mode='percent'), (data - 3) / 3.)\n assert_allclose(tester(mode='zscore'), (data - 3) / np.std([2, 3, 4]))\n x = data / 3.\n x = np.log10(x)\n s = np.std(x[:3])\n assert_allclose(tester(mode='zlogratio'), x / s)\n\n\[email protected]('preload', (True, False))\ndef test_epochs_baseline(preload):\n \"\"\"Test baseline and rescaling modes with and without preloading.\"\"\"\n data = np.array([[2, 3], [2, 3]], float)\n info = create_info(2, 1000., ('eeg', 'misc'))\n raw = RawArray(data, info)\n events = np.array([[0, 0, 1]])\n\n epochs = mne.Epochs(raw, events, None, 0, 1e-3, baseline=None,\n preload=preload)\n epochs.drop_bad()\n epochs_data = epochs.get_data()\n assert epochs_data.shape == (1, 2, 2)\n expected = data.copy()\n assert_array_equal(epochs_data[0], expected)\n # the baseline period (1 sample here)\n epochs.apply_baseline((0, 0))\n expected[0] = [0, 1]\n if preload:\n assert_allclose(epochs_data[0][0], expected[0])\n else:\n assert_allclose(epochs_data[0][0], expected[1])\n assert_allclose(epochs.get_data()[0], expected, atol=1e-7)\n # entire interval\n epochs.apply_baseline((None, None))\n expected[0] = [-0.5, 0.5]\n assert_allclose(epochs.get_data()[0], expected)\n\n # Preloading applies baseline correction.\n if preload:\n assert epochs._do_baseline is False\n else:\n assert epochs._do_baseline is True\n\n # we should not be able to remove baseline correction after the data\n # has been loaded\n epochs.apply_baseline((None, None))\n if preload:\n with pytest.raises(RuntimeError,\n match='You cannot remove baseline correction'):\n epochs.apply_baseline(None)\n else:\n epochs.apply_baseline(None)\n assert epochs.baseline is None\n\n\ndef test_epochs_bad_baseline():\n \"\"\"Test Epochs initialization with bad baseline parameters.\"\"\"\n raw, events = _get_data()[:2]\n\n with pytest.raises(ValueError, match='interval.*outside of epochs data'):\n epochs = Epochs(raw, events, None, -0.1, 0.3, (-0.2, 0))\n\n with pytest.raises(ValueError, match='interval.*outside of epochs data'):\n epochs = Epochs(raw, events, None, -0.1, 0.3, (0, 0.4))\n\n pytest.raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (0.1, 0))\n pytest.raises(ValueError, Epochs, raw, events, None, 0.1, 0.3, (None, 0))\n pytest.raises(ValueError, Epochs, raw, events, None, -0.3, -0.1, (0, None))\n epochs = Epochs(raw, events, None, 0.1, 0.3, baseline=None)\n epochs.load_data()\n pytest.raises(ValueError, epochs.apply_baseline, (None, 0))\n pytest.raises(ValueError, epochs.apply_baseline, (0, None))\n # put some rescale options here, too\n data = np.arange(100, dtype=float)\n pytest.raises(ValueError, rescale, data, times=data, baseline=(-2, -1))\n rescale(data.copy(), times=data, baseline=(2, 2)) # ok\n pytest.raises(ValueError, rescale, data, times=data, baseline=(2, 1))\n pytest.raises(ValueError, rescale, data, times=data, baseline=(100, 101))\n\n\ndef test_epoch_combine_ids():\n \"\"\"Test combining event ids in epochs compared to events.\"\"\"\n raw, events, picks = _get_data()\n epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3,\n 'd': 4, 'e': 5, 'f': 32},\n tmin, tmax, picks=picks, preload=False)\n events_new = merge_events(events, [1, 2], 12)\n epochs_new = combine_event_ids(epochs, ['a', 'b'], {'ab': 12})\n assert_equal(epochs_new['ab']._name, 'ab')\n assert_array_equal(events_new, epochs_new.events)\n # should probably add test + functionality for non-replacement XXX\n\n\ndef test_epoch_multi_ids():\n \"\"\"Test epoch selection via multiple/partial keys.\"\"\"\n raw, events, picks = _get_data()\n epochs = Epochs(raw, events, {'a/b/a': 1, 'a/b/b': 2, 'a/c': 3,\n 'b/d': 4, 'a_b': 5},\n tmin, tmax, picks=picks, preload=False)\n epochs_regular = epochs['a/b']\n epochs_reverse = epochs['b/a']\n epochs_multi = epochs[['a/b/a', 'a/b/b']]\n assert_array_equal(epochs_multi.events, epochs_regular.events)\n assert_array_equal(epochs_reverse.events, epochs_regular.events)\n assert_allclose(epochs_multi.get_data(), epochs_regular.get_data())\n assert_allclose(epochs_reverse.get_data(), epochs_regular.get_data())\n\n\ndef test_read_epochs_bad_events():\n \"\"\"Test epochs when events are at the beginning or the end of the file.\"\"\"\n raw, events, picks = _get_data()\n # Event at the beginning\n epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),\n event_id, tmin, tmax, picks=picks)\n with pytest.warns(RuntimeWarning, match='empty'):\n evoked = epochs.average()\n\n epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),\n event_id, tmin, tmax, picks=picks)\n assert (repr(epochs)) # test repr\n assert (epochs._repr_html_()) # test _repr_html_\n epochs.drop_bad()\n assert (repr(epochs))\n assert (epochs._repr_html_())\n with pytest.warns(RuntimeWarning, match='empty'):\n evoked = epochs.average()\n\n # Event at the end\n epochs = Epochs(raw, np.array([[raw.last_samp, 0, event_id]]),\n event_id, tmin, tmax, picks=picks)\n\n with pytest.warns(RuntimeWarning, match='empty'):\n evoked = epochs.average()\n assert evoked\n\n\ndef test_io_epochs_basic(tmp_path):\n \"\"\"Test epochs from raw files with IO as fif file.\"\"\"\n raw, events, picks = _get_data(preload=True)\n baseline = (None, 0)\n epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=baseline, preload=True)\n evoked = epochs.average()\n data = epochs.get_data()\n\n # Bad tmin/tmax parameters\n with pytest.raises(ValueError,\n match='tmin has to be less than or equal to tmax'):\n Epochs(raw, events, event_id, tmax, tmin, baseline=None)\n\n epochs_no_id = Epochs(raw, pick_events(events, include=event_id),\n None, tmin, tmax, picks=picks)\n assert_array_equal(data, epochs_no_id.get_data())\n\n eog_picks = pick_types(raw.info, meg=False, eeg=False, stim=False,\n eog=True, exclude='bads')\n eog_ch_names = [raw.ch_names[k] for k in eog_picks]\n epochs.drop_channels(eog_ch_names)\n assert (len(epochs.info['chs']) == len(epochs.ch_names) ==\n epochs.get_data().shape[1])\n data_no_eog = epochs.get_data()\n assert (data.shape[1] == (data_no_eog.shape[1] + len(eog_picks)))\n\n # test decim kwarg\n with pytest.warns(RuntimeWarning, match='aliasing'):\n epochs_dec = Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n decim=2)\n\n # decim without\n with epochs_dec.info._unlock():\n epochs_dec.info['lowpass'] = None\n with pytest.warns(RuntimeWarning, match='aliasing'):\n epochs_dec.decimate(2)\n\n data_dec = epochs_dec.get_data()\n assert_allclose(data[:, :, epochs_dec._decim_slice], data_dec, rtol=1e-7,\n atol=1e-12)\n\n evoked_dec = epochs_dec.average()\n assert_allclose(evoked.data[:, epochs_dec._decim_slice],\n evoked_dec.data, rtol=1e-12, atol=1e-17)\n\n n = evoked.data.shape[1]\n n_dec = evoked_dec.data.shape[1]\n n_dec_min = n // 4\n assert (n_dec_min <= n_dec <= n_dec_min + 1)\n assert (evoked_dec.info['sfreq'] == evoked.info['sfreq'] / 4)\n\n\[email protected]('proj', [\n pytest.param(True, marks=pytest.mark.slowtest),\n pytest.param('delayed', marks=pytest.mark.slowtest),\n False,\n])\ndef test_epochs_io_proj(tmp_path, proj):\n \"\"\"Test epochs I/O with projection.\"\"\"\n # Test event access on non-preloaded data (#2345)\n\n # due to reapplication of the proj matrix, this is our quality limit\n # for some tests\n tols = dict(atol=1e-3, rtol=1e-20)\n\n raw, events, picks = _get_data()\n events[::2, 1] = 1\n events[1::2, 2] = 2\n event_ids = dict(a=1, b=2)\n temp_fname = tmp_path / 'test-epo.fif'\n\n epochs = Epochs(raw, events, event_ids, tmin, tmax, picks=picks,\n proj=proj, reject=reject, flat=dict(),\n reject_tmin=tmin + 0.01, reject_tmax=tmax - 0.01)\n assert_equal(epochs.proj, proj if proj != 'delayed' else False)\n data1 = epochs.get_data()\n epochs2 = epochs.copy().apply_proj()\n assert_equal(epochs2.proj, True)\n data2 = epochs2.get_data()\n assert_allclose(data1, data2, **tols)\n epochs.save(temp_fname, overwrite=True)\n epochs_read = read_epochs(temp_fname, preload=False)\n assert_allclose(epochs.get_data(), epochs_read.get_data(), **tols)\n assert_allclose(epochs['a'].get_data(),\n epochs_read['a'].get_data(), **tols)\n assert_allclose(epochs['b'].get_data(),\n epochs_read['b'].get_data(), **tols)\n assert epochs.reject is not None\n assert object_diff(epochs.reject, reject) == ''\n assert epochs.flat is None # empty dict is functionally the same\n assert epochs.reject_tmin == tmin + 0.01\n assert epochs.reject_tmax == tmax - 0.01\n\n # ensure we don't leak file descriptors\n epochs_read = read_epochs(temp_fname, preload=False)\n epochs_copy = epochs_read.copy()\n del epochs_read\n epochs_copy.get_data()\n del epochs_copy\n\n\[email protected]\[email protected]('preload', (False, True))\ndef test_epochs_io_preload(tmp_path, preload):\n \"\"\"Test epochs I/O with preloading.\"\"\"\n # due to reapplication of the proj matrix, this is our quality limit\n # for some tests\n tols = dict(atol=1e-3, rtol=1e-20)\n\n raw, events, picks = _get_data(preload=preload)\n tempdir = str(tmp_path)\n temp_fname = op.join(tempdir, 'test-epo.fif')\n temp_fname_no_bl = op.join(tempdir, 'test_no_bl-epo.fif')\n baseline = (None, 0)\n with catch_logging() as log:\n epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=baseline, preload=True, verbose=True)\n log = log.getvalue()\n msg = 'Not setting metadata'\n assert log.count(msg) == 1, f'\\nto find:\\n{msg}\\n\\nlog:\\n{log}'\n load_msg = 'Loading data for 7 events and 421 original time points ...'\n if preload:\n load_msg = ('Using data from preloaded Raw for 7 events and 421 '\n 'original time points ...')\n assert log.count(load_msg) == 1, f'\\nto find:\\n{load_msg}\\n\\nlog:\\n{log}'\n\n evoked = epochs.average()\n epochs.save(temp_fname, overwrite=True)\n\n epochs_no_bl = Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=None, preload=True)\n assert epochs_no_bl.baseline is None\n epochs_no_bl.save(temp_fname_no_bl, overwrite=True)\n\n epochs_read = read_epochs(temp_fname, preload=preload)\n epochs_no_bl.save(temp_fname_no_bl, overwrite=True)\n epochs_read = read_epochs(temp_fname)\n epochs_no_bl_read = read_epochs(temp_fname_no_bl)\n with pytest.raises(ValueError, match='invalid'):\n epochs.apply_baseline(baseline=[1, 2, 3])\n epochs_with_bl = epochs_no_bl_read.copy().apply_baseline(baseline)\n assert (isinstance(epochs_with_bl, BaseEpochs))\n assert (epochs_with_bl.baseline == (epochs_no_bl_read.tmin, baseline[1]))\n assert (epochs_no_bl_read.baseline != baseline)\n assert (str(epochs_read).startswith('<Epochs'))\n\n epochs_no_bl_read.apply_baseline(baseline)\n assert_array_equal(epochs_no_bl_read.times, epochs.times)\n assert_array_almost_equal(epochs_read.get_data(), epochs.get_data())\n assert_array_almost_equal(epochs.get_data(),\n epochs_no_bl_read.get_data())\n assert_array_equal(epochs_read.times, epochs.times)\n assert_array_almost_equal(epochs_read.average().data, evoked.data)\n assert_equal(epochs_read.proj, epochs.proj)\n bmin, bmax = epochs.baseline\n if bmin is None:\n bmin = epochs.times[0]\n if bmax is None:\n bmax = epochs.times[-1]\n baseline = (bmin, bmax)\n assert_array_almost_equal(epochs_read.baseline, baseline)\n assert_array_almost_equal(epochs_read.tmin, epochs.tmin, 2)\n assert_array_almost_equal(epochs_read.tmax, epochs.tmax, 2)\n assert_equal(epochs_read.event_id, epochs.event_id)\n\n epochs.event_id.pop('1')\n epochs.event_id.update({'a:a': 1}) # test allow for ':' in key\n fname_temp = op.join(tempdir, 'foo-epo.fif')\n epochs.save(fname_temp, overwrite=True)\n epochs_read = read_epochs(fname_temp, preload=preload)\n assert_equal(epochs_read.event_id, epochs.event_id)\n assert_equal(epochs_read['a:a'].average().comment, 'a:a')\n\n # now use a baseline, crop it out, and I/O round trip afterward\n assert epochs.times[0] < 0\n assert epochs.times[-1] > 0\n epochs.apply_baseline((None, 0))\n baseline_before_crop = (epochs.times[0], 0)\n epochs.crop(1. / epochs.info['sfreq'], None)\n # baseline shouldn't be modified by crop()\n assert epochs.baseline == baseline_before_crop\n epochs.save(fname_temp, overwrite=True)\n epochs_read = read_epochs(fname_temp, preload=preload)\n assert_allclose(epochs_read.baseline, baseline_before_crop)\n\n assert_allclose(epochs.get_data(), epochs_read.get_data(),\n rtol=6e-4) # XXX this rtol should be better...?\n del epochs, epochs_read\n\n # add reject here so some of the epochs get dropped\n epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n reject=reject)\n epochs.save(temp_fname, overwrite=True)\n # ensure bad events are not saved\n epochs_read3 = read_epochs(temp_fname, preload=preload)\n assert_array_equal(epochs_read3.events, epochs.events)\n data = epochs.get_data()\n assert (epochs_read3.events.shape[0] == data.shape[0])\n\n # test copying loaded one (raw property)\n epochs_read4 = epochs_read3.copy()\n assert_array_almost_equal(epochs_read4.get_data(), data)\n # test equalizing loaded one (drop_log property)\n epochs_read4.equalize_event_counts(epochs.event_id)\n\n epochs.drop([1, 2], reason='can we recover orig ID?')\n epochs.save(temp_fname, overwrite=True)\n epochs_read5 = read_epochs(temp_fname, preload=preload)\n assert_array_equal(epochs_read5.selection, epochs.selection)\n assert_equal(len(epochs_read5.selection), len(epochs_read5.events))\n assert epochs_read5.drop_log == epochs.drop_log\n\n if preload:\n # Test that one can drop channels on read file\n epochs_read5.drop_channels(epochs_read5.ch_names[:1])\n\n # test warnings on bad filenames\n epochs_badname = op.join(tempdir, 'test-bad-name.fif.gz')\n with pytest.warns(RuntimeWarning, match='-epo.fif'):\n epochs.save(epochs_badname, overwrite=True)\n with pytest.warns(RuntimeWarning, match='-epo.fif'):\n read_epochs(epochs_badname, preload=preload)\n\n # test loading epochs with missing events\n epochs = Epochs(raw, events, dict(foo=1, bar=999), tmin, tmax,\n picks=picks, on_missing='ignore')\n epochs.save(temp_fname, overwrite=True)\n _assert_splits(temp_fname, 0, np.inf)\n epochs_read = read_epochs(temp_fname, preload=preload)\n assert_allclose(epochs.get_data(), epochs_read.get_data(), **tols)\n assert_array_equal(epochs.events, epochs_read.events)\n assert_equal(set(epochs.event_id.keys()),\n {str(x) for x in epochs_read.event_id.keys()})\n\n # test saving split epoch files\n split_size = '7MB'\n # ensure that we're in a position where just the data itself could fit\n # if that were all that we saved ...\n split_size_bytes = _get_split_size(split_size)\n assert epochs.get_data().nbytes // 2 < split_size_bytes\n epochs.save(temp_fname, split_size=split_size, overwrite=True)\n # ... but we correctly account for the other stuff we need to write,\n # so end up with two files ...\n _assert_splits(temp_fname, 1, split_size_bytes)\n epochs_read = read_epochs(temp_fname, preload=preload)\n # ... and none of the files exceed our limit.\n _assert_splits(temp_fname, 1, split_size_bytes)\n assert_allclose(epochs.get_data(), epochs_read.get_data(), **tols)\n assert_array_equal(epochs.events, epochs_read.events)\n assert_array_equal(epochs.selection, epochs_read.selection)\n assert epochs.drop_log == epochs_read.drop_log\n\n # Test that having a single time point works\n assert epochs.baseline is not None\n baseline_before_crop = epochs.baseline\n epochs.load_data().crop(0, 0)\n assert epochs.baseline == baseline_before_crop\n assert_equal(len(epochs.times), 1)\n assert_equal(epochs.get_data().shape[-1], 1)\n epochs.save(temp_fname, overwrite=True)\n epochs_read = read_epochs(temp_fname, preload=preload)\n assert_equal(len(epochs_read.times), 1)\n assert_equal(epochs.get_data().shape[-1], 1)\n\n\[email protected]('split_size, n_epochs, n_files, size', [\n ('1.5MB', 9, 6, 1572864),\n ('3MB', 18, 3, 3 * 1024 * 1024),\n])\[email protected]('metadata', [\n False,\n pytest.param(True, marks=pytest.mark.skipif(\n not check_version('pandas'), reason='Requires Pandas'))\n])\[email protected]('concat', (False, True))\ndef test_split_saving(tmp_path, split_size, n_epochs, n_files, size, metadata,\n concat):\n \"\"\"Test saving split epochs.\"\"\"\n # See gh-5102\n fs = 1000.\n n_times = int(round(fs * (n_epochs + 1)))\n raw = mne.io.RawArray(np.random.RandomState(0).randn(100, n_times),\n mne.create_info(100, 1000.))\n events = mne.make_fixed_length_events(raw, 1)\n epochs = mne.Epochs(raw, events)\n if split_size == '2MB' and (metadata or concat):\n n_files += 1\n if metadata:\n from pandas import DataFrame\n junk = ['*' * 10000 for _ in range(len(events))]\n metadata = DataFrame({\n 'event_time': events[:, 0] / raw.info['sfreq'],\n 'trial_number': range(len(events)),\n 'junk': junk})\n epochs.metadata = metadata\n if concat:\n epochs.drop_bad()\n epochs = concatenate_epochs([epochs[ii] for ii in range(len(epochs))])\n epochs_data = epochs.get_data()\n assert len(epochs) == n_epochs\n fname = tmp_path / 'test-epo.fif'\n epochs.save(fname, split_size=split_size, overwrite=True)\n got_size = _get_split_size(split_size)\n assert got_size == size\n _assert_splits(fname, n_files, size)\n assert not op.isfile(f'{str(fname)[:-4]}-{n_files + 1}.fif')\n for preload in (True, False):\n epochs2 = mne.read_epochs(fname, preload=preload)\n assert_allclose(epochs2.get_data(), epochs_data)\n assert_array_equal(epochs.events, epochs2.events)\n\n # Check that if BIDS is used and no split is needed it defaults to\n # simple writing without _split- entity.\n split_fname = str(tmp_path / 'test_epo.fif')\n split_fname_neuromag_part1 = split_fname.replace(\n 'epo.fif', f'epo-{n_files + 1}.fif')\n split_fname_bids_part1 = split_fname.replace(\n '_epo', f'_split-{n_files + 1:02d}_epo')\n\n epochs.save(split_fname, split_naming='bids', verbose=True)\n assert op.isfile(split_fname)\n assert not op.isfile(split_fname_bids_part1)\n for split_naming in ('neuromag', 'bids'):\n with pytest.raises(FileExistsError, match='Destination file'):\n epochs.save(split_fname, split_naming=split_naming, verbose=True)\n os.remove(split_fname)\n # we don't test for reserved files as it's not implemented here\n\n epochs.save(split_fname, split_size='1.4MB', verbose=True)\n # check that the filenames match the intended pattern\n assert op.isfile(split_fname)\n assert op.isfile(split_fname_neuromag_part1)\n # check that filenames are being formatted correctly for BIDS\n epochs.save(split_fname, split_size='1.4MB', split_naming='bids',\n overwrite=True, verbose=True)\n assert op.isfile(split_fname_bids_part1)\n\n\[email protected]\ndef test_split_many_reset(tmp_path):\n \"\"\"Test splitting with many events and using reset.\"\"\"\n data = np.zeros((1000, 1, 1024)) # 1 ch, 1024 samples\n assert data[0, 0].nbytes == 8192 # 8 kB per epoch\n info = mne.create_info(1, 1000., 'eeg')\n selection = np.arange(len(data)) + 100000\n epochs = EpochsArray(data, info, tmin=0., selection=selection)\n assert len(epochs.drop_log) == 101000\n assert len(epochs) == len(data) == len(epochs.events)\n fname = tmp_path / 'temp-epo.fif'\n for split_size in ('0.5MB', '1MB', '2MB'): # tons of overhead from sel\n with pytest.raises(ValueError, match='too small to safely'):\n epochs.save(fname, split_size=split_size, verbose='debug')\n with pytest.raises(ValueError, match='would result in writing'): # ~200\n epochs.save(fname, split_size='2.27MB', verbose='debug')\n with pytest.warns(RuntimeWarning, match='writing overhead'):\n epochs.save(fname, split_size='3MB', verbose='debug')\n epochs_read = read_epochs(fname)\n assert_allclose(epochs.get_data(), epochs_read.get_data())\n assert epochs.drop_log == epochs_read.drop_log\n mb = 3 * 1024 * 1024\n _assert_splits(fname, 6, mb)\n # reset, then it should work\n fname = tmp_path / 'temp-reset-epo.fif'\n epochs.reset_drop_log_selection()\n epochs.save(fname, split_size=split_size, verbose='debug')\n _assert_splits(fname, 4, mb)\n epochs_read = read_epochs(fname)\n assert_allclose(epochs.get_data(), epochs_read.get_data())\n\n\ndef _assert_splits(fname, n, size):\n __tracebackhide__ = True\n assert n >= 0\n next_fnames = [str(fname)] + [\n str(fname)[:-4] + '-%d.fif' % ii for ii in range(1, n + 2)]\n bad_fname = next_fnames.pop(-1)\n for ii, this_fname in enumerate(next_fnames[:-1]):\n assert op.isfile(this_fname), f'Missing file: {this_fname}'\n with open(this_fname, 'r') as fid:\n fid.seek(0, 2)\n file_size = fid.tell()\n min_ = 0.1 if ii < len(next_fnames) - 1 else 0.1\n assert size * min_ < file_size <= size, f'{this_fname}'\n assert not op.isfile(bad_fname), f'Errantly wrote {bad_fname}'\n\n\ndef test_epochs_proj(tmp_path):\n \"\"\"Test handling projection (apply proj in Raw or in Epochs).\"\"\"\n tempdir = str(tmp_path)\n raw, events, picks = _get_data()\n exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more\n this_picks = pick_types(raw.info, meg=True, eeg=False, stim=True,\n eog=True, exclude=exclude)\n epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,\n proj=True)\n assert (all(p['active'] is True for p in epochs.info['projs']))\n evoked = epochs.average()\n assert (all(p['active'] is True for p in evoked.info['projs']))\n data = epochs.get_data()\n\n raw_proj = read_raw_fif(raw_fname).apply_proj()\n epochs_no_proj = Epochs(raw_proj, events[:4], event_id, tmin, tmax,\n picks=this_picks, proj=False)\n\n data_no_proj = epochs_no_proj.get_data()\n assert (all(p['active'] is True for p in epochs_no_proj.info['projs']))\n evoked_no_proj = epochs_no_proj.average()\n assert (all(p['active'] is True for p in evoked_no_proj.info['projs']))\n assert (epochs_no_proj.proj is True) # as projs are active from Raw\n\n assert_array_almost_equal(data, data_no_proj, decimal=8)\n\n # make sure we can exclude avg ref\n this_picks = pick_types(raw.info, meg=True, eeg=True, stim=True,\n eog=True, exclude=exclude)\n epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,\n proj=True)\n epochs.set_eeg_reference(projection=True).apply_proj()\n assert (_has_eeg_average_ref_proj(epochs.info['projs']))\n epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,\n proj=True)\n assert (not _has_eeg_average_ref_proj(epochs.info['projs']))\n\n # make sure we don't add avg ref when a custom ref has been applied\n with raw.info._unlock():\n raw.info['custom_ref_applied'] = True\n epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,\n proj=True)\n assert (not _has_eeg_average_ref_proj(epochs.info['projs']))\n\n # From GH#2200:\n # This has no problem\n proj = raw.info['projs']\n epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,\n proj=False)\n with epochs.info._unlock():\n epochs.info['projs'] = []\n data = epochs.copy().add_proj(proj).apply_proj().get_data()\n # save and reload data\n fname_epo = op.join(tempdir, 'temp-epo.fif')\n epochs.save(fname_epo, overwrite=True) # Save without proj added\n epochs_read = read_epochs(fname_epo)\n epochs_read.add_proj(proj)\n epochs_read.apply_proj() # This used to bomb\n data_2 = epochs_read.get_data() # Let's check the result\n assert_allclose(data, data_2, atol=1e-15, rtol=1e-3)\n\n # adding EEG ref (GH #2727)\n raw = read_raw_fif(raw_fname)\n raw.add_proj([], remove_existing=True)\n raw.info['bads'] = ['MEG 2443', 'EEG 053']\n picks = pick_types(raw.info, meg=False, eeg=True, stim=True, eog=False,\n exclude='bads')\n epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,\n preload=True)\n epochs.pick_channels(['EEG 001', 'EEG 002'])\n assert_equal(len(epochs), 7) # sufficient for testing\n temp_fname = op.join(tempdir, 'test-epo.fif')\n epochs.save(temp_fname, overwrite=True)\n for preload in (True, False):\n epochs = read_epochs(temp_fname, proj=False, preload=preload)\n epochs.set_eeg_reference(projection=True).apply_proj()\n assert_allclose(epochs.get_data().mean(axis=1), 0, atol=1e-15)\n epochs = read_epochs(temp_fname, proj=False, preload=preload)\n epochs.set_eeg_reference(projection=True)\n pytest.raises(AssertionError, assert_allclose,\n epochs.get_data().mean(axis=1), 0., atol=1e-15)\n epochs.apply_proj()\n assert_allclose(epochs.get_data().mean(axis=1), 0, atol=1e-15)\n\n\ndef test_evoked_arithmetic():\n \"\"\"Test arithmetic of evoked data.\"\"\"\n raw, events, picks = _get_data()\n epochs1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks)\n evoked1 = epochs1.average()\n epochs2 = Epochs(raw, events[4:8], event_id, tmin, tmax, picks=picks)\n evoked2 = epochs2.average()\n epochs = Epochs(raw, events[:8], event_id, tmin, tmax, picks=picks)\n evoked = epochs.average()\n evoked_avg = combine_evoked([evoked1, evoked2], weights='nave')\n assert_array_equal(evoked.data, evoked_avg.data)\n assert_array_equal(evoked.times, evoked_avg.times)\n assert_equal(evoked_avg.nave, evoked1.nave + evoked2.nave)\n\n\ndef test_evoked_io_from_epochs(tmp_path):\n \"\"\"Test IO of evoked data made from epochs.\"\"\"\n tempdir = str(tmp_path)\n raw, events, picks = _get_data()\n with raw.info._unlock():\n raw.info['lowpass'] = 40 # avoid aliasing warnings\n # offset our tmin so we don't get exactly a zero value when decimating\n with catch_logging() as log:\n epochs = Epochs(raw, events[:4], event_id, tmin + 0.011, tmax,\n picks=picks, decim=5, preload=True, verbose=True)\n log = log.getvalue()\n load_msg = ('Loading data for 1 events and 415 original time points '\n '(prior to decimation) ...')\n assert log.count(load_msg) == 1, f'\\nto find:\\n{load_msg}\\n\\nlog:\\n{log}'\n evoked = epochs.average()\n with evoked.info._unlock():\n # Test that empty string shortcuts to None.\n evoked.info['proj_name'] = ''\n fname_temp = op.join(tempdir, 'evoked-ave.fif')\n evoked.save(fname_temp)\n evoked2 = read_evokeds(fname_temp)[0]\n assert_equal(evoked2.info['proj_name'], None)\n assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)\n assert_allclose(evoked.times, evoked2.times, rtol=1e-4,\n atol=1 / evoked.info['sfreq'])\n\n # now let's do one with negative time\n baseline = (0.1, 0.2)\n epochs = Epochs(raw, events[:4], event_id, 0.1, tmax,\n picks=picks, baseline=baseline, decim=5)\n evoked = epochs.average()\n assert_allclose(evoked.baseline, baseline)\n evoked.save(fname_temp, overwrite=True)\n evoked2 = read_evokeds(fname_temp)[0]\n assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)\n assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)\n assert_allclose(evoked.baseline, baseline)\n\n # should be equivalent to a cropped original\n baseline = (0.1, 0.2)\n epochs = Epochs(raw, events[:4], event_id, -0.2, tmax,\n picks=picks, baseline=baseline, decim=5)\n evoked = epochs.average()\n evoked.crop(0.099, None)\n assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)\n assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)\n assert_allclose(evoked.baseline, baseline)\n\n # should work when one channel type is changed to a non-data ch\n picks = pick_types(raw.info, meg=True, eeg=True)\n epochs = Epochs(raw, events[:4], event_id, -0.2, tmax,\n picks=picks, baseline=(0.1, 0.2), decim=5)\n with pytest.warns(RuntimeWarning, match='unit for.*changed from'):\n epochs.set_channel_types({epochs.ch_names[0]: 'syst'})\n evokeds = list()\n for picks in (None, 'all'):\n evoked = epochs.average(picks)\n evokeds.append(evoked)\n evoked.save(fname_temp, overwrite=True)\n evoked2 = read_evokeds(fname_temp)[0]\n start = 1 if picks is None else 0\n for ev in (evoked, evoked2):\n assert ev.ch_names == epochs.ch_names[start:]\n assert_allclose(ev.data, epochs.get_data().mean(0)[start:])\n with pytest.raises(ValueError, match='.*nchan.* must match'):\n write_evokeds(fname_temp, evokeds, overwrite=True)\n\n\ndef test_evoked_standard_error(tmp_path):\n \"\"\"Test calculation and read/write of standard error.\"\"\"\n raw, events, picks = _get_data()\n tempdir = str(tmp_path)\n epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks)\n evoked = [epochs.average(), epochs.standard_error()]\n write_evokeds(op.join(tempdir, 'evoked-ave.fif'), evoked)\n evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), [0, 1])\n evoked3 = [read_evokeds(op.join(tempdir, 'evoked-ave.fif'), '1'),\n read_evokeds(op.join(tempdir, 'evoked-ave.fif'), '1',\n kind='standard_error')]\n for evoked_new in [evoked2, evoked3]:\n assert (evoked_new[0]._aspect_kind ==\n FIFF.FIFFV_ASPECT_AVERAGE)\n assert (evoked_new[0].kind == 'average')\n assert (evoked_new[1]._aspect_kind ==\n FIFF.FIFFV_ASPECT_STD_ERR)\n assert (evoked_new[1].kind == 'standard_error')\n for ave, ave2 in zip(evoked, evoked_new):\n assert_array_almost_equal(ave.data, ave2.data)\n assert_array_almost_equal(ave.times, ave2.times)\n assert ave.nave == ave2.nave\n assert ave._aspect_kind == ave2._aspect_kind\n assert ave.kind == ave2.kind\n assert ave.last == ave2.last\n assert ave.first == ave2.first\n\n\ndef test_reject_epochs(tmp_path):\n \"\"\"Test of epochs rejection.\"\"\"\n tempdir = str(tmp_path)\n temp_fname = op.join(tempdir, 'test-epo.fif')\n\n raw, events, picks = _get_data()\n events1 = events[events[:, 2] == event_id]\n epochs = Epochs(raw, events1, event_id, tmin, tmax,\n reject=reject, flat=flat)\n pytest.raises(RuntimeError, len, epochs)\n n_events = len(epochs.events)\n data = epochs.get_data()\n n_clean_epochs = len(data)\n # Should match\n # mne_process_raw --raw test_raw.fif --projoff \\\n # --saveavetag -ave --ave test.ave --filteroff\n assert n_events > n_clean_epochs\n assert n_clean_epochs == 3\n assert epochs.drop_log == ((), (), (), ('MEG 2443',), ('MEG 2443',),\n ('MEG 2443',), ('MEG 2443',))\n\n # Ensure epochs are not dropped based on a bad channel\n raw_2 = raw.copy()\n raw_2.info['bads'] = ['MEG 2443']\n reject_crazy = dict(grad=1000e-15, mag=4e-15, eeg=80e-9, eog=150e-9)\n epochs = Epochs(raw_2, events1, event_id, tmin, tmax,\n reject=reject_crazy, flat=flat)\n epochs.drop_bad()\n\n assert (all('MEG 2442' in e for e in epochs.drop_log))\n assert (all('MEG 2443' not in e for e in epochs.drop_log))\n\n # Invalid reject_tmin/reject_tmax/detrend\n pytest.raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,\n reject_tmin=1., reject_tmax=0)\n pytest.raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,\n reject_tmin=tmin - 1, reject_tmax=1.)\n pytest.raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,\n reject_tmin=0., reject_tmax=tmax + 1)\n\n epochs = Epochs(raw, events1, event_id, tmin, tmax, picks=picks,\n reject=reject, flat=flat, reject_tmin=0., reject_tmax=.1)\n data = epochs.get_data()\n n_clean_epochs = len(data)\n assert n_clean_epochs == 7\n assert len(epochs) == 7\n assert epochs.times[epochs._reject_time][0] >= 0.\n assert epochs.times[epochs._reject_time][-1] <= 0.1\n\n # Invalid data for _is_good_epoch function\n epochs = Epochs(raw, events1, event_id, tmin, tmax)\n assert epochs._is_good_epoch(None) == (False, ('NO_DATA',))\n assert epochs._is_good_epoch(np.zeros((1, 1))) == (False, ('TOO_SHORT',))\n data = epochs[0].get_data()[0]\n assert epochs._is_good_epoch(data) == (True, None)\n\n # Check that reject_tmin and reject_tmax are being adjusted for small time\n # inaccuracies due to sfreq\n epochs = Epochs(raw=raw, events=events1, event_id=event_id,\n tmin=tmin, tmax=tmax, reject_tmin=tmin, reject_tmax=tmax)\n assert epochs.tmin != tmin\n assert epochs.tmax != tmax\n assert np.isclose(epochs.tmin, epochs.reject_tmin)\n assert np.isclose(epochs.tmax, epochs.reject_tmax)\n epochs.save(temp_fname, overwrite=True)\n read_epochs(temp_fname)\n\n # Ensure repeated rejection works, even if applied to only a subset of the\n # previously-used channel types\n epochs = Epochs(raw, events1, event_id, tmin, tmax,\n reject=reject, flat=flat)\n\n new_reject = reject.copy()\n new_flat = flat.copy()\n del new_reject['grad'], new_reject['eeg'], new_reject['eog']\n del new_flat['mag']\n\n # No changes expected\n epochs_cleaned = epochs.copy().drop_bad(reject=new_reject, flat=new_flat)\n assert epochs_cleaned.reject == epochs.reject\n assert epochs_cleaned.flat == epochs.flat\n\n new_reject['mag'] /= 2\n new_flat['grad'] *= 2\n # Only the newly-provided thresholds should be updated, the existing ones\n # should be kept\n epochs_cleaned = epochs.copy().drop_bad(reject=new_reject, flat=new_flat)\n assert epochs_cleaned.reject == dict(mag=new_reject['mag'],\n grad=reject['grad'],\n eeg=reject['eeg'],\n eog=reject['eog'])\n assert epochs_cleaned.flat == dict(grad=new_flat['grad'],\n mag=flat['mag'])\n\n\ndef test_preload_epochs():\n \"\"\"Test preload of epochs.\"\"\"\n raw, events, picks = _get_data()\n epochs_preload = Epochs(raw, events[:16], event_id, tmin, tmax,\n picks=picks, preload=True,\n reject=reject, flat=flat)\n data_preload = epochs_preload.get_data()\n\n epochs = Epochs(raw, events[:16], event_id, tmin, tmax, picks=picks,\n preload=False, reject=reject, flat=flat)\n data = epochs.get_data()\n assert_array_equal(data_preload, data)\n assert_array_almost_equal(epochs_preload.average().data,\n epochs.average().data, 18)\n\n\ndef test_indexing_slicing():\n \"\"\"Test of indexing and slicing operations.\"\"\"\n raw, events, picks = _get_data()\n epochs = Epochs(raw, events[:20], event_id, tmin, tmax, picks=picks,\n reject=reject, flat=flat)\n\n data_normal = epochs.get_data()\n\n n_good_events = data_normal.shape[0]\n\n # indices for slicing\n start_index = 1\n end_index = n_good_events - 1\n\n assert((end_index - start_index) > 0)\n\n for preload in [True, False]:\n epochs2 = Epochs(raw, events[:20], event_id, tmin, tmax, picks=picks,\n preload=preload, reject=reject, flat=flat)\n\n if not preload:\n epochs2.drop_bad()\n\n # using slicing\n epochs2_sliced = epochs2[start_index:end_index]\n\n data_epochs2_sliced = epochs2_sliced.get_data()\n assert_array_equal(data_epochs2_sliced,\n data_normal[start_index:end_index])\n\n # using indexing\n pos = 0\n for idx in range(start_index, end_index):\n data = epochs2_sliced[pos].get_data()\n assert_array_equal(data[0], data_normal[idx])\n pos += 1\n\n # using indexing with an int\n data = epochs2[data_epochs2_sliced.shape[0]].get_data()\n assert_array_equal(data, data_normal[[idx]])\n\n # using indexing with an array\n idx = rng.randint(0, data_epochs2_sliced.shape[0], 10)\n data = epochs2[idx].get_data()\n assert_array_equal(data, data_normal[idx])\n\n # using indexing with a list of indices\n idx = [0]\n data = epochs2[idx].get_data()\n assert_array_equal(data, data_normal[idx])\n idx = [0, 1]\n data = epochs2[idx].get_data()\n assert_array_equal(data, data_normal[idx])\n\n\ndef test_comparision_with_c():\n \"\"\"Test of average obtained vs C code.\"\"\"\n raw, events = _get_data()[:2]\n c_evoked = read_evokeds(evoked_nf_name, condition=0)\n epochs = Epochs(raw, events, event_id, tmin, tmax, baseline=None,\n preload=True, proj=False)\n evoked = epochs.set_eeg_reference(projection=True).apply_proj().average()\n sel = pick_channels(c_evoked.ch_names, evoked.ch_names)\n evoked_data = evoked.data\n c_evoked_data = c_evoked.data[sel]\n\n assert (evoked.nave == c_evoked.nave)\n assert_array_almost_equal(evoked_data, c_evoked_data, 10)\n assert_array_almost_equal(evoked.times, c_evoked.times, 12)\n\n\ndef test_crop(tmp_path):\n \"\"\"Test of crop of epochs.\"\"\"\n tempdir = str(tmp_path)\n temp_fname = op.join(tempdir, 'test-epo.fif')\n\n raw, events, picks = _get_data()\n epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,\n preload=False, reject=reject, flat=flat)\n pytest.raises(RuntimeError, epochs.crop, None, 0.2) # not preloaded\n data_normal = epochs.get_data()\n\n epochs2 = Epochs(raw, events[:5], event_id, tmin, tmax,\n picks=picks, preload=True, reject=reject, flat=flat)\n with pytest.warns(RuntimeWarning, match='tmax is set to'):\n epochs2.crop(-20, 200)\n\n # indices for slicing\n tmin_window = tmin + 0.1\n tmax_window = tmax - 0.1\n tmask = (epochs.times >= tmin_window) & (epochs.times <= tmax_window)\n assert (tmin_window > tmin)\n assert (tmax_window < tmax)\n\n epochs3 = epochs2.copy().crop(tmin_window, tmax_window)\n assert epochs3.baseline == epochs2.baseline\n data3 = epochs3.get_data()\n\n epochs2.crop(tmin_window, tmax_window)\n data2 = epochs2.get_data()\n\n assert_array_equal(data2, data_normal[:, :, tmask])\n assert_array_equal(data3, data_normal[:, :, tmask])\n assert_array_equal(epochs.time_as_index([tmin, tmax], use_rounding=True),\n [0, len(epochs.times) - 1])\n assert_array_equal(epochs3.time_as_index([tmin_window, tmax_window],\n use_rounding=True),\n [0, len(epochs3.times) - 1])\n\n # test time info is correct\n epochs = EpochsArray(np.zeros((1, 1, 1000)), create_info(1, 1000., 'eeg'),\n np.ones((1, 3), int), tmin=-0.2)\n epochs.crop(-.200, .700)\n last_time = epochs.times[-1]\n with pytest.warns(RuntimeWarning, match='aliasing'):\n epochs.decimate(10)\n assert_allclose(last_time, epochs.times[-1])\n want_time = epochs.times[-1] - 1. / epochs.info['sfreq']\n epochs.crop(None, epochs.times[-1], include_tmax=False)\n assert_allclose(epochs.times[-1], want_time)\n\n epochs = Epochs(raw, events[:5], event_id, -1, 1,\n picks=picks, preload=True, reject=reject, flat=flat)\n # We include nearest sample, so actually a bit beyond our bounds here\n assert_allclose(epochs.tmin, -1.0006410259015925, rtol=1e-12)\n assert_allclose(epochs.tmax, 1.0006410259015925, rtol=1e-12)\n epochs_crop = epochs.copy().crop(-1, 1)\n assert_allclose(epochs.times, epochs_crop.times, rtol=1e-12)\n # Ensure we don't allow silly crops\n with pytest.warns(RuntimeWarning, match='is set to'):\n pytest.raises(ValueError, epochs.crop, 1000, 2000)\n pytest.raises(ValueError, epochs.crop, 0.1, 0)\n\n # Test that cropping adjusts reject_tmin and reject_tmax if need be.\n epochs = Epochs(raw=raw, events=events[:5], event_id=event_id,\n tmin=tmin, tmax=tmax, reject_tmin=tmin, reject_tmax=tmax)\n epochs.load_data()\n epochs_cropped = epochs.copy().crop(0, None)\n assert np.isclose(epochs_cropped.tmin, epochs_cropped.reject_tmin)\n\n epochs_cropped = epochs.copy().crop(None, 0.1)\n assert np.isclose(epochs_cropped.tmax, epochs_cropped.reject_tmax)\n del epochs_cropped\n\n # Test that repeated cropping is idempotent\n epoch_crop = epochs.copy()\n epoch_crop.crop(None, 0.4, include_tmax=False)\n n_times = len(epoch_crop.times)\n with pytest.warns(RuntimeWarning, match='tmax is set to'):\n epoch_crop.crop(None, 0.4, include_tmax=False)\n assert len(epoch_crop.times) == n_times\n\n # Cropping & I/O roundtrip\n epochs.crop(0, 0.1)\n epochs.save(temp_fname)\n epochs_read = mne.read_epochs(temp_fname)\n assert np.isclose(epochs_read.tmin, epochs_read.reject_tmin)\n assert np.isclose(epochs_read.tmax, epochs_read.reject_tmax)\n\n\ndef test_resample():\n \"\"\"Test of resample of epochs.\"\"\"\n raw, events, picks = _get_data()\n epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,\n preload=False, reject=reject, flat=flat)\n pytest.raises(RuntimeError, epochs.resample, 100)\n\n epochs_o = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,\n preload=True, reject=reject, flat=flat)\n epochs = epochs_o.copy()\n\n data_normal = deepcopy(epochs.get_data())\n times_normal = deepcopy(epochs.times)\n sfreq_normal = epochs.info['sfreq']\n # upsample by 2\n epochs = epochs_o.copy()\n epochs.resample(sfreq_normal * 2, npad=0)\n data_up = deepcopy(epochs.get_data())\n times_up = deepcopy(epochs.times)\n sfreq_up = epochs.info['sfreq']\n # downsamply by 2, which should match\n epochs.resample(sfreq_normal, npad=0)\n data_new = deepcopy(epochs.get_data())\n times_new = deepcopy(epochs.times)\n sfreq_new = epochs.info['sfreq']\n assert (data_up.shape[2] == 2 * data_normal.shape[2])\n assert (sfreq_up == 2 * sfreq_normal)\n assert (sfreq_new == sfreq_normal)\n assert (len(times_up) == 2 * len(times_normal))\n assert_array_almost_equal(times_new, times_normal, 10)\n assert (data_up.shape[2] == 2 * data_normal.shape[2])\n assert_array_almost_equal(data_new, data_normal, 5)\n\n # use parallel\n epochs = epochs_o.copy()\n epochs.resample(sfreq_normal * 2, n_jobs=1, npad=0)\n assert (np.allclose(data_up, epochs._data, rtol=1e-8, atol=1e-16))\n\n # test copy flag\n epochs = epochs_o.copy()\n epochs_resampled = epochs.copy().resample(sfreq_normal * 2, npad=0)\n assert (epochs_resampled is not epochs)\n epochs_resampled = epochs.resample(sfreq_normal * 2, npad=0)\n assert (epochs_resampled is epochs)\n\n # test proper setting of times (#2645)\n n_trial, n_chan, n_time, sfreq = 1, 1, 10, 1000.\n data = np.zeros((n_trial, n_chan, n_time))\n events = np.zeros((n_trial, 3), int)\n info = create_info(n_chan, sfreq, 'eeg')\n epochs1 = EpochsArray(data, deepcopy(info), events)\n epochs2 = EpochsArray(data, deepcopy(info), events)\n epochs = concatenate_epochs([epochs1, epochs2])\n epochs1.resample(epochs1.info['sfreq'] // 2, npad='auto')\n epochs2.resample(epochs2.info['sfreq'] // 2, npad='auto')\n epochs = concatenate_epochs([epochs1, epochs2])\n for e in epochs1, epochs2, epochs:\n assert_equal(e.times[0], epochs.tmin)\n assert_equal(e.times[-1], epochs.tmax)\n # test that cropping after resampling works (#3296)\n this_tmin = -0.002\n epochs = EpochsArray(data, deepcopy(info), events, tmin=this_tmin)\n for times in (epochs.times, epochs._raw_times):\n assert_allclose(times, np.arange(n_time) / sfreq + this_tmin)\n epochs.resample(info['sfreq'] * 2.)\n for times in (epochs.times, epochs._raw_times):\n assert_allclose(times, np.arange(2 * n_time) / (sfreq * 2) + this_tmin)\n epochs.crop(0, None)\n for times in (epochs.times, epochs._raw_times):\n assert_allclose(times, np.arange((n_time - 2) * 2) / (sfreq * 2))\n epochs.resample(sfreq)\n for times in (epochs.times, epochs._raw_times):\n assert_allclose(times, np.arange(n_time - 2) / sfreq)\n\n\ndef test_detrend():\n \"\"\"Test detrending of epochs.\"\"\"\n raw, events, picks = _get_data()\n\n # test first-order\n epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,\n baseline=None, detrend=1)\n epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,\n baseline=None, detrend=None)\n data_picks = pick_types(epochs_1.info, meg=True, eeg=True,\n exclude='bads')\n evoked_1 = epochs_1.average()\n evoked_2 = epochs_2.average()\n evoked_2.detrend(1)\n # Due to roundoff these won't be exactly equal, but they should be close\n assert_allclose(evoked_1.data, evoked_2.data, rtol=1e-8, atol=1e-20)\n\n # test zeroth-order case\n for preload in [True, False]:\n epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,\n baseline=(None, None), preload=preload)\n epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,\n baseline=None, preload=preload, detrend=0)\n a = epochs_1.get_data()\n b = epochs_2.get_data()\n # All data channels should be almost equal\n assert_allclose(a[:, data_picks, :], b[:, data_picks, :],\n rtol=1e-16, atol=1e-20)\n # There are non-M/EEG channels that should not be equal:\n assert not np.allclose(a, b)\n\n for value in ['foo', 2, False, True]:\n pytest.raises(ValueError, Epochs, raw, events[:4], event_id,\n tmin, tmax, detrend=value)\n\n\ndef test_bootstrap():\n \"\"\"Test of bootstrapping of epochs.\"\"\"\n raw, events, picks = _get_data()\n epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,\n preload=True, reject=reject, flat=flat)\n random_states = [0]\n if check_version('numpy', '1.17'):\n random_states += [np.random.default_rng(0)]\n for random_state in random_states:\n epochs2 = bootstrap(epochs, random_state=random_state)\n assert (len(epochs2.events) == len(epochs.events))\n assert (epochs._data.shape == epochs2._data.shape)\n\n\ndef test_epochs_copy():\n \"\"\"Test copy epochs.\"\"\"\n raw, events, picks = _get_data()\n epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,\n preload=True, reject=reject, flat=flat)\n copied = epochs.copy()\n assert_array_equal(epochs._data, copied._data)\n\n epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,\n preload=False, reject=reject, flat=flat)\n copied = epochs.copy()\n data = epochs.get_data()\n copied_data = copied.get_data()\n assert_array_equal(data, copied_data)\n\n\ndef test_iter_evoked():\n \"\"\"Test the iterator for epochs -> evoked.\"\"\"\n raw, events, picks = _get_data()\n epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks)\n\n for ii, ev in enumerate(epochs.iter_evoked()):\n x = ev.data\n y = epochs.get_data()[ii, :, :]\n assert_array_equal(x, y)\n\n\[email protected]('preload', (True, False))\ndef test_iter_epochs(preload):\n \"\"\"Test iteration over epochs.\"\"\"\n raw, events, picks = _get_data()\n epochs = Epochs(\n raw, events[:5], event_id, tmin, tmax, picks=picks, preload=preload)\n assert not hasattr(epochs, '_current_detrend_picks')\n epochs_data = epochs.get_data()\n data = list()\n for _ in range(10):\n try:\n data.append(next(epochs))\n except StopIteration:\n break\n else:\n assert hasattr(epochs, '_current_detrend_picks')\n assert not hasattr(epochs, '_current_detrend_picks')\n data = np.array(data)\n assert_allclose(data, epochs_data, atol=1e-20)\n\n\ndef test_subtract_evoked():\n \"\"\"Test subtraction of Evoked from Epochs.\"\"\"\n raw, events, picks = _get_data()\n epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks)\n\n # make sure subtraction fails if data channels are missing\n pytest.raises(ValueError, epochs.subtract_evoked,\n epochs.average(picks[:5]))\n\n # do the subtraction using the default argument\n epochs.subtract_evoked()\n\n # apply SSP now\n epochs.apply_proj()\n\n # use preloading and SSP from the start\n epochs2 = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,\n preload=True)\n\n evoked = epochs2.average()\n epochs2.subtract_evoked(evoked)\n\n # this gives the same result\n assert_allclose(epochs.get_data(), epochs2.get_data())\n\n # if we compute the evoked response after subtracting it we get zero\n zero_evoked = epochs.average()\n data = zero_evoked.data\n assert_allclose(data, np.zeros_like(data), atol=1e-15)\n\n # with decimation (gh-7854)\n epochs3 = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,\n decim=10, verbose='error')\n data_old = epochs2.decimate(10, verbose='error').get_data()\n data = epochs3.subtract_evoked().get_data()\n assert_allclose(data, data_old)\n assert_allclose(epochs3.average().data, 0., atol=1e-20)\n\n\ndef test_epoch_eq():\n \"\"\"Test epoch count equalization and condition combining.\"\"\"\n raw, events, picks = _get_data()\n # equalizing epochs objects\n events_1 = events[events[:, 2] == event_id]\n epochs_1 = Epochs(raw, events_1, event_id, tmin, tmax, picks=picks)\n events_2 = events[events[:, 2] == event_id_2]\n epochs_2 = Epochs(raw, events_2, event_id_2, tmin, tmax, picks=picks)\n epochs_1.drop_bad() # make sure drops are logged\n assert_equal(len([log for log in epochs_1.drop_log if not log]),\n len(epochs_1.events))\n assert epochs_1.drop_log == ((),) * len(epochs_1.events)\n assert_equal(len([lg for lg in epochs_1.drop_log if not lg]),\n len(epochs_1.events))\n assert (epochs_1.events.shape[0] != epochs_2.events.shape[0])\n equalize_epoch_counts([epochs_1, epochs_2], method='mintime')\n assert_equal(epochs_1.events.shape[0], epochs_2.events.shape[0])\n epochs_3 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)\n epochs_4 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)\n equalize_epoch_counts([epochs_3, epochs_4], method='truncate')\n assert_equal(epochs_1.events.shape[0], epochs_3.events.shape[0])\n assert_equal(epochs_3.events.shape[0], epochs_4.events.shape[0])\n\n # equalizing conditions\n epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},\n tmin, tmax, picks=picks, reject=reject)\n epochs.drop_bad() # make sure drops are logged\n assert_equal(len([log for log in epochs.drop_log if not log]),\n len(epochs.events))\n drop_log1 = deepcopy(epochs.drop_log)\n old_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]\n epochs.equalize_event_counts(['a', 'b'])\n # undo the eq logging\n drop_log2 = tuple(() if log == ('EQUALIZED_COUNT',) else log\n for log in epochs.drop_log)\n assert_equal(drop_log1, drop_log2)\n\n assert_equal(len([log for log in epochs.drop_log if not log]),\n len(epochs.events))\n new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]\n assert_equal(new_shapes[0], new_shapes[1])\n assert_equal(new_shapes[2], new_shapes[2])\n assert_equal(new_shapes[3], new_shapes[3])\n # now with two conditions collapsed\n old_shapes = new_shapes\n epochs.equalize_event_counts([['a', 'b'], 'c'])\n new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]\n assert_equal(new_shapes[0] + new_shapes[1], new_shapes[2])\n assert_equal(new_shapes[3], old_shapes[3])\n with pytest.raises(KeyError, match='keys must be strings, got'):\n epochs.equalize_event_counts([1, 'a'])\n\n # now let's combine conditions\n old_shapes = new_shapes\n epochs.equalize_event_counts([['a', 'b'], ['c', 'd']])\n new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]\n assert_equal(old_shapes[0] + old_shapes[1], new_shapes[0] + new_shapes[1])\n assert_equal(new_shapes[0] + new_shapes[1], new_shapes[2] + new_shapes[3])\n with pytest.raises(ValueError, match='value must not already exist'):\n combine_event_ids(epochs, ['a', 'b'], {'ab': 1})\n\n combine_event_ids(epochs, ['a', 'b'], {'ab': np.int32(12)}, copy=False)\n caught = 0\n for key in ['a', 'b']:\n try:\n epochs[key]\n except KeyError:\n caught += 1\n assert_equal(caught, 2)\n assert (not np.any(epochs.events[:, 2] == 1))\n assert (not np.any(epochs.events[:, 2] == 2))\n epochs = combine_event_ids(epochs, ['c', 'd'], {'cd': 34})\n assert np.all(np.logical_or(epochs.events[:, 2] == 12,\n epochs.events[:, 2] == 34))\n assert_equal(epochs['ab'].events.shape[0], old_shapes[0] + old_shapes[1])\n assert_equal(epochs['ab'].events.shape[0], epochs['cd'].events.shape[0])\n\n # equalizing with hierarchical tags\n epochs = Epochs(raw, events, {'a/x': 1, 'b/x': 2, 'a/y': 3, 'b/y': 4},\n tmin, tmax, picks=picks, reject=reject)\n cond1, cond2 = ['a', ['b/x', 'b/y']], [['a/x', 'a/y'], 'b']\n es = [epochs.copy().equalize_event_counts(c)[0]\n for c in (cond1, cond2)]\n assert_array_equal(es[0].events[:, 0], es[1].events[:, 0])\n with pytest.raises(ValueError, match='mix hierarchical and regular'):\n epochs.equalize_event_counts(['a', ['b', 'b/y']])\n with pytest.raises(ValueError, match='overlapping. Provide an orthogonal'):\n epochs.equalize_event_counts([['a/x', 'a/y'], 'x'])\n with pytest.raises(KeyError, match='not found in the epoch object'):\n epochs.equalize_event_counts([\"a/no_match\", \"b\"])\n # test equalization with only one epoch in each cond\n epo = epochs[[0, 1, 5]]\n assert len(epo['x']) == 2\n assert len(epo['y']) == 1\n epo_, drop_inds = epo.equalize_event_counts()\n assert len(epo_) == 2\n assert drop_inds.shape == (1,)\n # test equalization with no events of one type\n epochs.drop(np.arange(10))\n assert_equal(len(epochs['a/x']), 0)\n assert (len(epochs['a/y']) > 0)\n epochs.equalize_event_counts(['a/x', 'a/y'])\n assert_equal(len(epochs['a/x']), 0)\n assert_equal(len(epochs['a/y']), 0)\n\n # test default behavior (event_ids=None)\n epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},\n tmin, tmax, picks=picks, reject=reject)\n epochs_1, _ = epochs.copy().equalize_event_counts()\n epochs_2, _ = epochs.copy().equalize_event_counts(list(epochs.event_id))\n assert_array_equal(epochs_1.events, epochs_2.events)\n\n # test invalid values of event_ids\n with pytest.raises(TypeError, match='received a string'):\n epochs.equalize_event_counts('hello!')\n\n with pytest.raises(TypeError, match='list-like or None'):\n epochs.equalize_event_counts(1.5)\n\n\ndef test_access_by_name(tmp_path):\n \"\"\"Test accessing epochs by event name and on_missing for rare events.\"\"\"\n tempdir = str(tmp_path)\n raw, events, picks = _get_data()\n\n # Test various invalid inputs\n pytest.raises(TypeError, Epochs, raw, events, {1: 42, 2: 42}, tmin,\n tmax, picks=picks)\n pytest.raises(TypeError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},\n tmin, tmax, picks=picks)\n pytest.raises(TypeError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},\n tmin, tmax, picks=picks)\n pytest.raises(TypeError, Epochs, raw, events, 'foo', tmin, tmax,\n picks=picks)\n pytest.raises(TypeError, Epochs, raw, events, ['foo'], tmin, tmax,\n picks=picks)\n\n # Test accessing non-existent events (assumes 12345678 does not exist)\n event_id_illegal = dict(aud_l=1, does_not_exist=12345678)\n pytest.raises(ValueError, Epochs, raw, events, event_id_illegal,\n tmin, tmax)\n # Test on_missing\n pytest.raises(ValueError, Epochs, raw, events, event_id_illegal, tmin,\n tmax, on_missing='foo')\n with pytest.warns(RuntimeWarning, match='No matching events'):\n Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='warn')\n Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='ignore')\n\n # Test constructing epochs with a list of ints as events\n epochs = Epochs(raw, events, [1, 2], tmin, tmax, picks=picks)\n for k, v in epochs.event_id.items():\n assert_equal(int(k), v)\n\n epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)\n pytest.raises(KeyError, epochs.__getitem__, 'bar')\n\n data = epochs['a'].get_data()\n event_a = events[events[:, 2] == 1]\n assert (len(data) == len(event_a))\n\n epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks,\n preload=True)\n pytest.raises(KeyError, epochs.__getitem__, 'bar')\n temp_fname = op.join(tempdir, 'test-epo.fif')\n epochs.save(temp_fname, overwrite=True)\n epochs2 = read_epochs(temp_fname)\n\n for ep in [epochs, epochs2]:\n data = ep['a'].get_data()\n event_a = events[events[:, 2] == 1]\n assert (len(data) == len(event_a))\n\n assert_array_equal(epochs2['a'].events, epochs['a'].events)\n\n epochs3 = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},\n tmin, tmax, picks=picks, preload=True)\n assert_equal(list(sorted(epochs3[('a', 'b')].event_id.values())),\n [1, 2])\n epochs4 = epochs['a']\n epochs5 = epochs3['a']\n assert_array_equal(epochs4.events, epochs5.events)\n # 20 is our tolerance because epochs are written out as floats\n assert_array_almost_equal(epochs4.get_data(), epochs5.get_data(), 20)\n epochs6 = epochs3[['a', 'b']]\n assert all(np.logical_or(epochs6.events[:, 2] == 1,\n epochs6.events[:, 2] == 2))\n assert_array_equal(epochs.events, epochs6.events)\n assert_array_almost_equal(epochs.get_data(), epochs6.get_data(), 20)\n\n # Make sure we preserve names\n assert_equal(epochs['a']._name, 'a')\n assert_equal(epochs[['a', 'b']]['a']._name, 'a')\n\n\[email protected]\n@requires_pandas\ndef test_to_data_frame():\n \"\"\"Test epochs Pandas exporter.\"\"\"\n raw, events, picks = _get_data()\n epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)\n # test index checking\n with pytest.raises(ValueError, match='options. Valid index options are'):\n epochs.to_data_frame(index=['foo', 'bar'])\n with pytest.raises(ValueError, match='\"qux\" is not a valid option'):\n epochs.to_data_frame(index='qux')\n with pytest.raises(TypeError, match='index must be `None` or a string or'):\n epochs.to_data_frame(index=np.arange(400))\n # test wide format\n df_wide = epochs.to_data_frame()\n assert all(np.in1d(epochs.ch_names, df_wide.columns))\n assert all(np.in1d(['time', 'epoch', 'condition'], df_wide.columns))\n # test long format\n df_long = epochs.to_data_frame(long_format=True)\n expected = ('condition', 'epoch', 'time', 'channel', 'ch_type', 'value')\n assert set(expected) == set(df_long.columns)\n assert set(epochs.ch_names) == set(df_long['channel'])\n assert(len(df_long) == epochs.get_data().size)\n # test long format w/ index\n df_long = epochs.to_data_frame(long_format=True, index=['epoch'])\n del df_wide, df_long\n # test scalings\n df = epochs.to_data_frame(index=['condition', 'epoch', 'time'])\n data = np.hstack(epochs.get_data())\n assert_array_equal(df.values[:, 0], data[0] * 1e13)\n assert_array_equal(df.values[:, 2], data[2] * 1e15)\n\n\n@requires_pandas\[email protected]('index', ('time', ['condition', 'time', 'epoch'],\n ['epoch', 'time'], ['time', 'epoch'], None))\ndef test_to_data_frame_index(index):\n \"\"\"Test index creation in epochs Pandas exporter.\"\"\"\n raw, events, picks = _get_data()\n epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)\n df = epochs.to_data_frame(picks=[11, 12, 14], index=index)\n # test index order/hierarchy preservation\n if not isinstance(index, list):\n index = [index]\n assert (df.index.names == index)\n # test that non-indexed data were present as columns\n non_index = list(set(['condition', 'time', 'epoch']) - set(index))\n if len(non_index):\n assert all(np.in1d(non_index, df.columns))\n\n\n@requires_pandas\[email protected]('time_format', (None, 'ms', 'timedelta'))\ndef test_to_data_frame_time_format(time_format):\n \"\"\"Test time conversion in epochs Pandas exporter.\"\"\"\n from pandas import Timedelta\n raw, events, picks = _get_data()\n epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)\n # test time_format\n df = epochs.to_data_frame(time_format=time_format)\n dtypes = {None: np.float64, 'ms': np.int64, 'timedelta': Timedelta}\n assert isinstance(df['time'].iloc[0], dtypes[time_format])\n\n\ndef test_epochs_proj_mixin():\n \"\"\"Test SSP proj methods from ProjMixin class.\"\"\"\n raw, events, picks = _get_data()\n for proj in [True, False]:\n epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,\n proj=proj)\n\n assert (all(p['active'] == proj for p in epochs.info['projs']))\n\n # test adding / deleting proj\n if proj:\n epochs.get_data()\n assert (all(p['active'] == proj for p in epochs.info['projs']))\n pytest.raises(ValueError, epochs.add_proj, epochs.info['projs'][0],\n {'remove_existing': True})\n pytest.raises(ValueError, epochs.add_proj, 'spam')\n pytest.raises(ValueError, epochs.del_proj, 0)\n else:\n projs = deepcopy(epochs.info['projs'])\n n_proj = len(epochs.info['projs'])\n epochs.del_proj(0)\n assert (len(epochs.info['projs']) == n_proj - 1)\n # Test that already existing projections are not added.\n epochs.add_proj(projs, remove_existing=False)\n assert (len(epochs.info['projs']) == n_proj)\n epochs.add_proj(projs[:-1], remove_existing=True)\n assert (len(epochs.info['projs']) == n_proj - 1)\n\n # catch no-gos.\n # wrong proj argument\n pytest.raises(ValueError, Epochs, raw, events[:4], event_id, tmin, tmax,\n picks=picks, proj='crazy')\n\n for preload in [True, False]:\n epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,\n proj='delayed', preload=preload,\n reject=reject).set_eeg_reference(projection=True)\n epochs_proj = Epochs(\n raw, events[:4], event_id, tmin, tmax, picks=picks,\n proj=True, preload=preload,\n reject=reject).set_eeg_reference(projection=True).apply_proj()\n\n epochs_noproj = Epochs(raw, events[:4], event_id, tmin, tmax,\n picks=picks, proj=False, preload=preload,\n reject=reject)\n epochs_noproj.set_eeg_reference(projection=True)\n\n assert_allclose(epochs.copy().apply_proj().get_data(),\n epochs_proj.get_data(), rtol=1e-10, atol=1e-25)\n assert_allclose(epochs.get_data(),\n epochs_noproj.get_data(), rtol=1e-10, atol=1e-25)\n\n # make sure data output is constant across repeated calls\n # e.g. drop bads\n assert_array_equal(epochs.get_data(), epochs.get_data())\n assert_array_equal(epochs_proj.get_data(), epochs_proj.get_data())\n assert_array_equal(epochs_noproj.get_data(), epochs_noproj.get_data())\n\n # test epochs.next calls\n data = epochs.get_data().copy()\n data2 = np.array([e for e in epochs])\n assert_array_equal(data, data2)\n\n # cross application from processing stream 1 to 2\n epochs.apply_proj()\n assert_array_equal(epochs._projector, epochs_proj._projector)\n assert_allclose(epochs._data, epochs_proj.get_data())\n\n # test mixin against manual application\n epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,\n baseline=None,\n proj=False).set_eeg_reference(projection=True)\n data = epochs.get_data().copy()\n epochs.apply_proj()\n assert_allclose(np.dot(epochs._projector, data[0]), epochs._data[0])\n\n\ndef test_delayed_epochs():\n \"\"\"Test delayed projection on Epochs.\"\"\"\n raw, events, picks = _get_data()\n events = events[:10]\n picks = np.concatenate([pick_types(raw.info, meg=True, eeg=True)[::22],\n pick_types(raw.info, meg=False, eeg=False,\n ecg=True, eog=True)])\n picks = np.sort(picks)\n raw.load_data().pick_channels([raw.ch_names[pick] for pick in picks])\n raw.info.normalize_proj()\n del picks\n n_epochs = 2 # number we expect after rejection\n with raw.info._unlock():\n raw.info['lowpass'] = 40. # fake the LP info so no warnings\n for decim in (1, 3):\n proj_data = Epochs(raw, events, event_id, tmin, tmax, proj=True,\n reject=reject, decim=decim)\n use_tmin = proj_data.tmin\n proj_data = proj_data.get_data()\n noproj_data = Epochs(raw, events, event_id, tmin, tmax, proj=False,\n reject=reject, decim=decim).get_data()\n assert_equal(proj_data.shape, noproj_data.shape)\n assert_equal(proj_data.shape[0], n_epochs)\n for preload in (True, False):\n for proj in (True, False, 'delayed'):\n for ii in range(3):\n print(decim, preload, proj, ii)\n comp = proj_data if proj is True else noproj_data\n if ii in (0, 1):\n epochs = Epochs(raw, events, event_id, tmin, tmax,\n proj=proj, reject=reject,\n preload=preload, decim=decim)\n else:\n fake_events = np.zeros((len(comp), 3), int)\n fake_events[:, 0] = np.arange(len(comp))\n fake_events[:, 2] = 1\n epochs = EpochsArray(comp, raw.info, tmin=use_tmin,\n event_id=1, events=fake_events,\n proj=proj)\n with epochs.info._unlock():\n epochs.info['sfreq'] /= decim\n assert_equal(len(epochs), n_epochs)\n assert (raw.proj is False)\n assert (epochs.proj is\n (True if proj is True else False))\n if ii == 1:\n epochs.load_data()\n picks_data = pick_types(epochs.info, meg=True, eeg=True)\n evoked = epochs.average(picks=picks_data)\n assert_equal(evoked.nave, n_epochs, str(epochs.drop_log))\n if proj is True:\n evoked.apply_proj()\n else:\n assert (evoked.proj is False)\n assert_array_equal(evoked.ch_names,\n np.array(epochs.ch_names)[picks_data])\n assert_allclose(evoked.times, epochs.times)\n epochs_data = epochs.get_data()\n assert_allclose(evoked.data,\n epochs_data.mean(axis=0)[picks_data],\n rtol=1e-5, atol=1e-20)\n assert_allclose(epochs_data, comp, rtol=1e-5, atol=1e-20)\n\n\ndef test_drop_epochs():\n \"\"\"Test dropping of epochs.\"\"\"\n raw, events, picks = _get_data()\n epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks)\n events1 = events[events[:, 2] == event_id]\n\n # Bound checks\n pytest.raises(IndexError, epochs.drop, [len(epochs.events)])\n pytest.raises(IndexError, epochs.drop, [-len(epochs.events) - 1])\n pytest.raises(ValueError, epochs.drop, [[1, 2], [3, 4]])\n\n # Test selection attribute\n assert_array_equal(epochs.selection,\n np.where(events[:, 2] == event_id)[0])\n assert_equal(len(epochs.drop_log), len(events))\n assert (all(epochs.drop_log[k] == ('IGNORED',)\n for k in set(range(len(events))) - set(epochs.selection)))\n\n selection = epochs.selection.copy()\n n_events = len(epochs.events)\n epochs.drop([2, 4], reason='d')\n assert_equal(epochs.drop_log_stats(), 2. / n_events * 100)\n assert_equal(len(epochs.drop_log), len(events))\n assert_equal([epochs.drop_log[k]\n for k in selection[[2, 4]]], [['d'], ['d']])\n assert_array_equal(events[epochs.selection], events1[[0, 1, 3, 5, 6]])\n assert_array_equal(events[epochs[3:].selection], events1[[5, 6]])\n assert_array_equal(events[epochs['1'].selection], events1[[0, 1, 3, 5, 6]])\n\n\[email protected]('preload', (True, False))\ndef test_drop_epochs_mult(preload):\n \"\"\"Test that subselecting epochs or making fewer epochs is similar.\"\"\"\n raw, events, picks = _get_data()\n assert_array_equal(events[14], [33712, 0, 1]) # event type a\n epochs1 = Epochs(raw, events, {'a': 1, 'b': 2},\n tmin, tmax, picks=picks, reject=reject,\n preload=preload)\n epochs2 = Epochs(raw, events, {'a': 1},\n tmin, tmax, picks=picks, reject=reject,\n preload=preload)\n epochs1 = epochs1['a']\n assert_array_equal(epochs1.events, epochs2.events)\n assert_array_equal(epochs1.selection, epochs2.selection)\n\n if preload:\n # In the preload case you cannot know the bads if already ignored\n assert len(epochs1.drop_log) == len(epochs2.drop_log)\n for di, (d1, d2) in enumerate(zip(epochs1.drop_log, epochs2.drop_log)):\n assert isinstance(d1, tuple)\n assert isinstance(d2, tuple)\n msg = (f'\\nepochs1.drop_log[{di}] = {d1}, '\n f'\\nepochs2.drop_log[{di}] = {d2}')\n if 'IGNORED' in d1:\n assert 'IGNORED' in d2, msg\n if 'IGNORED' not in d1 and d1 != ():\n assert ((d2 == d1) or (d2 == ('IGNORED',))), msg\n if d1 == ():\n assert (d2 == ()), msg\n else:\n # In the non preload is should be exactly the same\n assert epochs1.drop_log == epochs2.drop_log\n\n\ndef test_contains():\n \"\"\"Test membership API.\"\"\"\n raw, events = _get_data(True)[:2]\n # Add seeg channel\n seeg = RawArray(np.zeros((1, len(raw.times))),\n create_info(['SEEG 001'], raw.info['sfreq'], 'seeg'))\n with seeg.info._unlock():\n for key in ('dev_head_t', 'highpass', 'lowpass',\n 'dig', 'description', 'acq_pars', 'experimenter',\n 'proj_name'):\n seeg.info[key] = raw.info[key]\n raw.add_channels([seeg])\n # Add dbs channel\n dbs = RawArray(np.zeros((1, len(raw.times))),\n create_info(['DBS 001'], raw.info['sfreq'], 'dbs'))\n with dbs.info._unlock():\n for key in ('dev_head_t', 'highpass', 'lowpass',\n 'dig', 'description', 'acq_pars', 'experimenter',\n 'proj_name'):\n dbs.info[key] = raw.info[key]\n raw.add_channels([dbs])\n tests = [(('mag', False, False, False), ('grad', 'eeg', 'seeg', 'dbs')),\n (('grad', False, False, False), ('mag', 'eeg', 'seeg', 'dbs')),\n ((False, True, False, False), ('grad', 'mag', 'seeg', 'dbs')),\n ((False, False, True, False), ('grad', 'mag', 'eeg', 'dbs'))]\n\n for (meg, eeg, seeg, dbs), others in tests:\n picks_contains = pick_types(raw.info, meg=meg, eeg=eeg, seeg=seeg,\n dbs=dbs)\n epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax,\n picks=picks_contains)\n if eeg:\n test = 'eeg'\n elif seeg:\n test = 'seeg'\n elif dbs:\n test = 'dbs'\n else:\n test = meg\n assert (test in epochs)\n assert (not any(o in epochs for o in others))\n\n pytest.raises(ValueError, epochs.__contains__, 'foo')\n pytest.raises(TypeError, epochs.__contains__, 1)\n\n\ndef test_drop_channels_mixin():\n \"\"\"Test channels-dropping functionality.\"\"\"\n raw, events = _get_data()[:2]\n # here without picks to get additional coverage\n epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)\n drop_ch = epochs.ch_names[:3]\n ch_names = epochs.ch_names[3:]\n\n ch_names_orig = epochs.ch_names\n dummy = epochs.copy().drop_channels(drop_ch)\n assert_equal(ch_names, dummy.ch_names)\n assert_equal(ch_names_orig, epochs.ch_names)\n assert_equal(len(ch_names_orig), epochs.get_data().shape[1])\n\n epochs.drop_channels(drop_ch)\n assert_equal(ch_names, epochs.ch_names)\n assert_equal(len(ch_names), epochs.get_data().shape[1])\n\n\ndef test_pick_channels_mixin():\n \"\"\"Test channel-picking functionality.\"\"\"\n raw, events, picks = _get_data()\n epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n preload=True)\n ch_names = epochs.ch_names[:3]\n epochs.preload = False\n pytest.raises(RuntimeError, epochs.drop_channels, [ch_names[0]])\n epochs.preload = True\n ch_names_orig = epochs.ch_names\n dummy = epochs.copy().pick_channels(ch_names)\n assert_equal(ch_names, dummy.ch_names)\n assert_equal(ch_names_orig, epochs.ch_names)\n assert_equal(len(ch_names_orig), epochs.get_data().shape[1])\n\n epochs.pick_channels(ch_names)\n assert_equal(ch_names, epochs.ch_names)\n assert_equal(len(ch_names), epochs.get_data().shape[1])\n\n # Invalid picks\n pytest.raises(ValueError, Epochs, raw, events, event_id, tmin, tmax,\n picks=[])\n\n\ndef test_equalize_channels():\n \"\"\"Test equalization of channels.\"\"\"\n raw, events, picks = _get_data()\n epochs1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n proj=False, preload=True)\n epochs2 = epochs1.copy()\n ch_names = epochs1.ch_names[2:]\n epochs1.drop_channels(epochs1.ch_names[:1])\n epochs2.drop_channels(epochs2.ch_names[1:2])\n my_comparison = [epochs1, epochs2]\n my_comparison = equalize_channels(my_comparison)\n for e in my_comparison:\n assert_equal(ch_names, e.ch_names)\n\n\ndef test_illegal_event_id():\n \"\"\"Test handling of invalid events ids.\"\"\"\n raw, events, picks = _get_data()\n event_id_illegal = dict(aud_l=1, does_not_exist=12345678)\n\n pytest.raises(ValueError, Epochs, raw, events, event_id_illegal, tmin,\n tmax, picks=picks, proj=False)\n\n\ndef test_add_channels_epochs():\n \"\"\"Test adding channels.\"\"\"\n raw, events, picks = _get_data()\n\n def make_epochs(picks, proj):\n return Epochs(raw, events, event_id, tmin, tmax, preload=True,\n proj=proj, picks=picks)\n\n picks = pick_types(raw.info, meg=True, eeg=True, exclude='bads')\n picks_meg = pick_types(raw.info, meg=True, eeg=False, exclude='bads')\n picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')\n\n for proj in (False, True):\n epochs = make_epochs(picks=picks, proj=proj)\n epochs_meg = make_epochs(picks=picks_meg, proj=proj)\n assert not epochs_meg.times.flags['WRITEABLE']\n epochs_eeg = make_epochs(picks=picks_eeg, proj=proj)\n epochs.info._check_consistency()\n epochs_meg.info._check_consistency()\n epochs_eeg.info._check_consistency()\n\n epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])\n\n assert_equal(len(epochs.info['projs']), len(epochs2.info['projs']))\n assert_equal(len(epochs.info.keys()), len(epochs_meg.info.keys()))\n assert_equal(len(epochs.info.keys()), len(epochs_eeg.info.keys()))\n assert_equal(len(epochs.info.keys()), len(epochs2.info.keys()))\n\n data1 = epochs.get_data()\n data2 = epochs2.get_data()\n data3 = np.concatenate([e.get_data() for e in\n [epochs_meg, epochs_eeg]], axis=1)\n assert_array_equal(data1.shape, data2.shape)\n assert_allclose(data1, data3, atol=1e-25)\n assert_allclose(data1, data2, atol=1e-25)\n\n assert not epochs_meg.times.flags['WRITEABLE']\n epochs_meg2 = epochs_meg.copy()\n assert not epochs_meg.times.flags['WRITEABLE']\n assert not epochs_meg2.times.flags['WRITEABLE']\n epochs_meg2.set_meas_date(0)\n add_channels_epochs([epochs_meg2, epochs_eeg])\n\n epochs_meg2 = epochs_meg.copy()\n epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])\n\n epochs_meg2 = epochs_meg.copy()\n epochs_meg2.events[3, 2] -= 1\n pytest.raises(ValueError, add_channels_epochs, [epochs_meg2, epochs_eeg])\n\n pytest.raises(ValueError, add_channels_epochs,\n [epochs_meg, epochs_eeg[:2]])\n\n epochs_meg.info['chs'].pop(0)\n epochs_meg.info._update_redundant()\n pytest.raises(RuntimeError, add_channels_epochs, [epochs_meg, epochs_eeg])\n\n epochs_meg2 = epochs_meg.copy()\n with epochs_meg2.info._unlock():\n epochs_meg2.info['sfreq'] = None\n pytest.raises(RuntimeError, add_channels_epochs, [epochs_meg2, epochs_eeg])\n\n epochs_meg2 = epochs_meg.copy()\n with epochs_meg2.info._unlock():\n epochs_meg2.info['sfreq'] += 10\n pytest.raises(RuntimeError, add_channels_epochs, [epochs_meg2, epochs_eeg])\n\n epochs_meg2 = epochs_meg.copy()\n with epochs_meg2.info._unlock():\n epochs_meg2.info['chs'][1]['ch_name'] = epochs_meg2.info['ch_names'][0]\n epochs_meg2.info._update_redundant()\n with pytest.warns(RuntimeWarning, match='not unique'):\n pytest.raises(RuntimeError, add_channels_epochs,\n [epochs_meg2, epochs_eeg])\n\n epochs_meg2 = epochs_meg.copy()\n with epochs_meg2.info._unlock():\n epochs_meg2.info['dev_head_t']['to'] += 1\n pytest.raises(ValueError, add_channels_epochs, [epochs_meg2, epochs_eeg])\n\n epochs_meg2 = epochs_meg.copy()\n with epochs_meg2.info._unlock():\n epochs_meg2.info['dev_head_t']['to'] += 1\n pytest.raises(ValueError, add_channels_epochs, [epochs_meg2, epochs_eeg])\n\n epochs_meg2 = epochs_meg.copy()\n epochs_meg2.info['experimenter'] = 'foo'\n pytest.raises(RuntimeError, add_channels_epochs, [epochs_meg2, epochs_eeg])\n\n epochs_meg2 = epochs_meg.copy()\n epochs_meg2.preload = False\n pytest.raises(ValueError, add_channels_epochs, [epochs_meg2, epochs_eeg])\n\n epochs_meg2 = epochs_meg.copy()\n epochs_meg2._set_times(epochs_meg.times + 0.4)\n pytest.raises(NotImplementedError, add_channels_epochs,\n [epochs_meg2, epochs_eeg])\n\n epochs_meg2 = epochs_meg.copy()\n epochs_meg2._set_times(epochs_meg2.times + 0.5)\n assert not epochs_meg2.times.flags['WRITEABLE']\n pytest.raises(NotImplementedError, add_channels_epochs,\n [epochs_meg2, epochs_eeg])\n\n epochs_meg2 = epochs_meg.copy()\n epochs_meg2.baseline = None\n pytest.raises(NotImplementedError, add_channels_epochs,\n [epochs_meg2, epochs_eeg])\n\n epochs_meg2 = epochs_meg.copy()\n epochs_meg2.event_id['b'] = 2\n pytest.raises(NotImplementedError, add_channels_epochs,\n [epochs_meg2, epochs_eeg])\n\n # use delayed projection, add channel, ensure projectors match\n epochs_meg2 = make_epochs(picks=picks_meg, proj='delayed')\n assert len(epochs_meg2.info['projs']) == 3\n meg2_proj = epochs_meg2._projector\n assert meg2_proj is not None\n epochs_eeg = make_epochs(picks=picks_eeg, proj='delayed')\n epochs_meg2.add_channels([epochs_eeg])\n del epochs_eeg\n assert len(epochs_meg2.info['projs']) == 3\n new_proj = epochs_meg2._projector\n n_meg, n_eeg = len(picks_meg), len(picks_eeg)\n n_tot = n_meg + n_eeg\n assert new_proj.shape == (n_tot,) * 2\n assert_allclose(new_proj[:n_meg, :n_meg], meg2_proj, atol=1e-12)\n assert_allclose(new_proj[n_meg:, n_meg:], np.eye(n_eeg), atol=1e-12)\n\n\ndef test_array_epochs(tmp_path):\n \"\"\"Test creating epochs from array.\"\"\"\n tempdir = str(tmp_path)\n\n # creating\n data = rng.random_sample((10, 20, 300))\n sfreq = 1e3\n ch_names = ['EEG %03d' % (i + 1) for i in range(20)]\n types = ['eeg'] * 20\n info = create_info(ch_names, sfreq, types)\n events = np.c_[np.arange(1, 600, 60),\n np.zeros(10, int),\n [1, 2] * 5]\n epochs = EpochsArray(data, info, events, tmin)\n assert epochs.event_id == {'1': 1, '2': 2}\n assert (str(epochs).startswith('<EpochsArray'))\n # From GH#1963\n with pytest.raises(ValueError, match='number of events must match'):\n EpochsArray(data[:-1], info, events, tmin)\n pytest.raises(ValueError, EpochsArray, data, info, events, tmin,\n dict(a=1))\n pytest.raises(ValueError, EpochsArray, data, info, events, tmin,\n selection=[1])\n # should be fine\n EpochsArray(data, info, events, tmin, selection=np.arange(len(events)) + 5)\n\n # saving\n temp_fname = op.join(tempdir, 'test-epo.fif')\n epochs.save(temp_fname, overwrite=True)\n epochs2 = read_epochs(temp_fname)\n data2 = epochs2.get_data()\n assert_allclose(data, data2)\n assert_allclose(epochs.times, epochs2.times)\n assert_equal(epochs.event_id, epochs2.event_id)\n assert_array_equal(epochs.events, epochs2.events)\n\n # plotting\n epochs[0].plot()\n plt.close('all')\n\n # indexing\n assert_array_equal(np.unique(epochs['1'].events[:, 2]), np.array([1]))\n assert_equal(len(epochs[:2]), 2)\n data[0, 5, 150] = 3000\n data[1, :, :] = 0\n data[2, 5, 210] = 3000\n data[3, 5, 260] = 0\n epochs = EpochsArray(data, info, events=events,\n tmin=0, reject=dict(eeg=1000), flat=dict(eeg=1e-1),\n reject_tmin=0.1, reject_tmax=0.2)\n assert_equal(len(epochs), len(events) - 2)\n assert_equal(epochs.drop_log[0], ['EEG 006'])\n assert_equal(len(epochs.drop_log), 10)\n assert_equal(len(epochs.events), len(epochs.selection))\n\n # baseline\n data = np.ones((10, 20, 300))\n epochs = EpochsArray(data, info, events, tmin=-.2, baseline=(None, 0))\n ep_data = epochs.get_data()\n assert_array_equal(ep_data, np.zeros_like(ep_data))\n\n # one time point\n epochs = EpochsArray(data[:, :, :1], info, events=events, tmin=0.)\n assert_allclose(epochs.times, [0.])\n assert_allclose(epochs.get_data(), data[:, :, :1])\n epochs.save(temp_fname, overwrite=True)\n epochs_read = read_epochs(temp_fname)\n assert_allclose(epochs_read.times, [0.])\n assert_allclose(epochs_read.get_data(), data[:, :, :1])\n\n # event as integer (#2435)\n mask = (events[:, 2] == 1)\n data_1 = data[mask]\n events_1 = events[mask]\n epochs = EpochsArray(data_1, info, events=events_1, event_id=1, tmin=-0.2)\n\n # default events\n epochs = EpochsArray(data_1, info)\n assert_array_equal(epochs.events[:, 0], np.arange(len(data_1)))\n assert_array_equal(epochs.events[:, 1], np.zeros(len(data_1), int))\n assert_array_equal(epochs.events[:, 2], np.ones(len(data_1), int))\n\n\ndef test_concatenate_epochs():\n \"\"\"Test concatenate epochs.\"\"\"\n raw, events, picks = _get_data()\n epochs = Epochs(raw=raw, events=events, event_id=event_id, tmin=tmin,\n tmax=tmax, picks=picks)\n epochs2 = epochs.copy()\n epochs_list = [epochs, epochs2]\n epochs_conc = concatenate_epochs(epochs_list)\n assert_array_equal(\n epochs_conc.events[:, 0], np.unique(epochs_conc.events[:, 0]))\n\n expected_shape = list(epochs.get_data().shape)\n expected_shape[0] *= 2\n expected_shape = tuple(expected_shape)\n\n assert_equal(epochs_conc.get_data().shape, expected_shape)\n assert_equal(epochs_conc.drop_log, epochs.drop_log * 2)\n\n epochs2 = epochs.copy().load_data()\n with pytest.raises(ValueError, match=r\"epochs\\[1\\].info\\['nchan'\\] must\"):\n concatenate_epochs(\n [epochs, epochs2.copy().drop_channels(epochs2.ch_names[:1])])\n\n epochs2._set_times(np.delete(epochs2.times, 1))\n with pytest.raises(ValueError, match='could not be broadcast'):\n concatenate_epochs([epochs, epochs2])\n\n assert_equal(epochs_conc._raw, None)\n\n # check if baseline is same for all epochs\n epochs2 = epochs.copy()\n epochs2.apply_baseline((-0.1, None))\n with pytest.raises(ValueError, match='Baseline must be same'):\n concatenate_epochs([epochs, epochs2])\n\n # check if dev_head_t is same\n epochs2 = epochs.copy()\n concatenate_epochs([epochs, epochs2]) # should work\n epochs2.info['dev_head_t']['trans'][:3, 3] += 0.0001\n with pytest.raises(ValueError, match=r\"info\\['dev_head_t'\\] differs\"):\n concatenate_epochs([epochs, epochs2])\n with pytest.raises(TypeError, match='must be a list or tuple'):\n concatenate_epochs('foo')\n with pytest.raises(TypeError, match='must be an instance of Epochs'):\n concatenate_epochs([epochs, 'foo'])\n epochs2.info['dev_head_t'] = None\n with pytest.raises(ValueError, match=r\"info\\['dev_head_t'\\] differs\"):\n concatenate_epochs([epochs, epochs2])\n epochs.info['dev_head_t'] = None\n concatenate_epochs([epochs, epochs2]) # should work\n\n # check that different event_id does not work:\n epochs1 = epochs.copy()\n epochs2 = epochs.copy()\n epochs1.event_id = dict(a=1)\n epochs2.event_id = dict(a=2)\n with pytest.raises(ValueError, match='identical keys'):\n concatenate_epochs([epochs1, epochs2])\n\n # check concatenating epochs where one of the objects is empty\n epochs2 = epochs.copy()[:0]\n with pytest.warns(RuntimeWarning, match='was empty'):\n concatenate_epochs([epochs, epochs2])\n\n # check concatenating epochs results are chronologically ordered\n epochs2 = epochs.copy().load_data()\n # Ensure first event is at 0\n epochs2.events[:, 0] -= np.min(epochs2.events[:, 0])\n with pytest.warns(RuntimeWarning, match='not chronologically ordered'):\n concatenate_epochs([epochs, epochs2], add_offset=False)\n concatenate_epochs([epochs, epochs2], add_offset=True)\n\n\[email protected]\ndef test_concatenate_epochs_large():\n \"\"\"Test concatenating epochs on large data.\"\"\"\n raw, events, picks = _get_data()\n epochs = Epochs(raw=raw, events=events, event_id=event_id, tmin=tmin,\n tmax=tmax, picks=picks, preload=True)\n\n # check events are shifted, but relative position are equal\n epochs_list = [epochs.copy() for ii in range(3)]\n epochs_cat = concatenate_epochs(epochs_list)\n for ii in range(3):\n evs = epochs_cat.events[ii * len(epochs):(ii + 1) * len(epochs)]\n rel_pos = epochs_list[ii].events[:, 0] - evs[:, 0]\n assert (sum(rel_pos - rel_pos[0]) == 0)\n\n # test large number of epochs\n long_epochs_list = [epochs.copy() for ii in range(60)]\n many_epochs_cat = concatenate_epochs(long_epochs_list)\n max_expected_sample_index = 60 * 1.2 * np.max(epochs.events[:, 0])\n assert np.max(many_epochs_cat.events[:, 0]) < max_expected_sample_index\n\n\ndef test_add_channels():\n \"\"\"Test epoch splitting / re-appending channel types.\"\"\"\n raw, events, picks = _get_data()\n epoch_nopre = Epochs(\n raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,\n picks=picks)\n epoch = Epochs(\n raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,\n picks=picks, preload=True)\n epoch_eeg = epoch.copy().pick_types(meg=False, eeg=True)\n epoch_meg = epoch.copy().pick_types(meg=True)\n epoch_stim = epoch.copy().pick_types(meg=False, stim=True)\n epoch_eeg_meg = epoch.copy().pick_types(meg=True, eeg=True)\n epoch_new = epoch_meg.copy().add_channels([epoch_eeg, epoch_stim])\n assert all(ch in epoch_new.ch_names\n for ch in epoch_stim.ch_names + epoch_meg.ch_names)\n epoch_new = epoch_meg.copy().add_channels([epoch_eeg])\n\n assert (ch in epoch_new.ch_names for ch in epoch.ch_names)\n assert_array_equal(epoch_new._data, epoch_eeg_meg._data)\n assert all(ch not in epoch_new.ch_names\n for ch in epoch_stim.ch_names)\n\n # Now test errors\n epoch_badsf = epoch_eeg.copy()\n with epoch_badsf.info._unlock():\n epoch_badsf.info['sfreq'] = 3.1415927\n epoch_eeg = epoch_eeg.crop(-.1, .1)\n\n epoch_meg.load_data()\n pytest.raises(RuntimeError, epoch_meg.add_channels, [epoch_nopre])\n pytest.raises(RuntimeError, epoch_meg.add_channels, [epoch_badsf])\n pytest.raises(AssertionError, epoch_meg.add_channels, [epoch_eeg])\n pytest.raises(ValueError, epoch_meg.add_channels, [epoch_meg])\n pytest.raises(TypeError, epoch_meg.add_channels, epoch_badsf)\n\n\ndef test_seeg_ecog():\n \"\"\"Test compatibility of the Epoch object with SEEG, DBS and ECoG data.\"\"\"\n n_epochs, n_channels, n_times, sfreq = 5, 10, 20, 1000.\n data = np.ones((n_epochs, n_channels, n_times))\n events = np.array([np.arange(n_epochs), [0] * n_epochs, [1] * n_epochs]).T\n pick_dict = dict(meg=False, exclude=[])\n for key in ('seeg', 'dbs', 'ecog'):\n info = create_info(n_channels, sfreq, key)\n epochs = EpochsArray(data, info, events)\n pick_dict.update({key: True})\n picks = pick_types(epochs.info, **pick_dict)\n del pick_dict[key]\n assert_equal(len(picks), n_channels)\n\n\ndef test_default_values():\n \"\"\"Test default event_id, tmax tmin values are working correctly.\"\"\"\n raw, events = _get_data()[:2]\n epoch_1 = Epochs(raw, events[:1], preload=True)\n epoch_2 = Epochs(raw, events[:1], tmin=-0.2, tmax=0.5, preload=True)\n assert_equal(hash(epoch_1), hash(epoch_2))\n\n\n@requires_pandas\ndef test_metadata(tmp_path):\n \"\"\"Test metadata support with pandas.\"\"\"\n from pandas import DataFrame\n\n data = np.random.randn(10, 2, 2000)\n chs = ['a', 'b']\n info = create_info(chs, 1000)\n meta = np.array([[1.] * 5 + [3.] * 5,\n ['a'] * 2 + ['b'] * 3 + ['c'] * 3 + ['µ'] * 2],\n dtype='object').T\n meta = DataFrame(meta, columns=['num', 'letter'])\n meta['num'] = np.array(meta['num'], float)\n events = np.arange(meta.shape[0])\n events = np.column_stack([events, np.zeros([len(events), 2])]).astype(int)\n events[5:, -1] = 1\n event_id = {'zero': 0, 'one': 1}\n with catch_logging() as log:\n epochs = EpochsArray(data, info, metadata=meta,\n events=events, event_id=event_id, verbose=True)\n log = log.getvalue()\n msg = 'Adding metadata with 2 columns'\n assert log.count(msg) == 1, f'\\nto find:\\n{msg}\\n\\nlog:\\n{log}'\n with use_log_level(True):\n with catch_logging() as log:\n epochs.metadata = meta\n log = log.getvalue().strip()\n assert log == 'Replacing existing metadata with 2 columns', f'{log}'\n indices = np.arange(len(epochs)) # expected indices\n assert_array_equal(epochs.metadata.index, indices)\n\n assert len(epochs[[1, 2]].events) == len(epochs[[1, 2]].metadata)\n assert_array_equal(epochs[[1, 2]].metadata.index, indices[[1, 2]])\n assert len(epochs['one']) == 5\n\n # Construction\n with pytest.raises(ValueError):\n # Events and metadata must have same len\n epochs_arr = EpochsArray(epochs._data, epochs.info, epochs.events[:-1],\n tmin=0, event_id=epochs.event_id,\n metadata=epochs.metadata)\n\n with pytest.raises(ValueError):\n # Events and data must have same len\n epochs = EpochsArray(data, info, metadata=meta.iloc[:-1])\n\n for data in [meta.values, meta['num']]:\n # Metadata must be a DataFrame\n with pytest.raises(ValueError):\n epochs = EpochsArray(data, info, metadata=data)\n\n # Need strings, ints, and floats\n with pytest.raises(ValueError):\n tmp_meta = meta.copy()\n tmp_meta['foo'] = np.array # This should be of type object\n epochs = EpochsArray(data, info, metadata=tmp_meta)\n\n # Getitem\n assert len(epochs['num < 2']) == 5\n assert len(epochs['num < 5']) == 10\n assert len(epochs['letter == \"b\"']) == 3\n assert len(epochs['num < 5']) == len(epochs['num < 5'].metadata)\n\n with pytest.raises(KeyError):\n epochs['blah == \"yo\"']\n\n assert_array_equal(epochs.selection, indices)\n epochs.drop(0)\n assert_array_equal(epochs.selection, indices[1:])\n assert_array_equal(epochs.metadata.index, indices[1:])\n epochs.drop([0, -1])\n assert_array_equal(epochs.selection, indices[2:-1])\n assert_array_equal(epochs.metadata.index, indices[2:-1])\n assert_array_equal(len(epochs), 7) # originally 10\n\n # I/O\n # Make sure values don't change with I/O\n tempdir = str(tmp_path)\n temp_fname = op.join(tempdir, 'tmp-epo.fif')\n temp_one_fname = op.join(tempdir, 'tmp-one-epo.fif')\n with catch_logging() as log:\n epochs.save(temp_fname, verbose=True, overwrite=True)\n assert log.getvalue() == '' # assert no junk from metadata setting\n epochs_read = read_epochs(temp_fname, preload=True)\n assert_metadata_equal(epochs.metadata, epochs_read.metadata)\n epochs_arr = EpochsArray(epochs._data, epochs.info, epochs.events,\n tmin=0, event_id=epochs.event_id,\n metadata=epochs.metadata,\n selection=epochs.selection)\n assert_metadata_equal(epochs.metadata, epochs_arr.metadata)\n\n with pytest.raises(TypeError): # Needs to be a dataframe\n epochs.metadata = np.array([0])\n\n ###########################################################################\n # Now let's fake having no Pandas and make sure everything works\n\n epochs_one = epochs['one']\n epochs_one.save(temp_one_fname, overwrite=True)\n epochs_one_read = read_epochs(temp_one_fname)\n assert_metadata_equal(epochs_one.metadata, epochs_one_read.metadata)\n\n with _FakeNoPandas():\n epochs_read = read_epochs(temp_fname)\n assert isinstance(epochs_read.metadata, list)\n assert isinstance(epochs_read.metadata[0], dict)\n assert epochs_read.metadata[5]['num'] == 3.\n\n epochs_one_read = read_epochs(temp_one_fname)\n assert isinstance(epochs_one_read.metadata, list)\n assert isinstance(epochs_one_read.metadata[0], dict)\n assert epochs_one_read.metadata[0]['num'] == 3.\n\n epochs_one_nopandas = epochs_read['one']\n assert epochs_read.metadata[5]['num'] == 3.\n assert epochs_one_nopandas.metadata[0]['num'] == 3.\n # sel (no Pandas) == sel (w/ Pandas) -> save -> load (no Pandas)\n assert_metadata_equal(epochs_one_nopandas.metadata,\n epochs_one_read.metadata)\n epochs_one_nopandas.save(temp_one_fname, overwrite=True)\n # can't make this query\n with pytest.raises(KeyError) as excinfo:\n epochs_read['num < 2']\n excinfo.match('.*Pandas query could not be performed.*')\n # still can't, but with no metadata the message should be different\n epochs_read.metadata = None\n with pytest.raises(KeyError) as excinfo:\n epochs_read['num < 2']\n excinfo.match(r'^((?!Pandas).)*$')\n del epochs_read\n # sel (no Pandas) == sel (no Pandas) -> save -> load (no Pandas)\n epochs_one_nopandas_read = read_epochs(temp_one_fname)\n assert_metadata_equal(epochs_one_nopandas_read.metadata,\n epochs_one_nopandas.metadata)\n # sel (w/ Pandas) == sel (no Pandas) -> save -> load (w/ Pandas)\n epochs_one_nopandas_read = read_epochs(temp_one_fname)\n assert_metadata_equal(epochs_one_nopandas_read.metadata,\n epochs_one.metadata)\n\n # gh-4820\n raw_data = np.random.randn(10, 1000)\n info = mne.create_info(10, 1000.)\n raw = mne.io.RawArray(raw_data, info)\n events = [[0, 0, 1], [100, 0, 1], [200, 0, 1], [300, 0, 1]]\n metadata = DataFrame([dict(idx=idx) for idx in range(len(events))])\n epochs = mne.Epochs(raw, events=events, tmin=-.050, tmax=.100,\n metadata=metadata)\n epochs.drop_bad()\n assert len(epochs) == len(epochs.metadata)\n\n # gh-4821\n epochs.metadata['new_key'] = 1\n assert_array_equal(epochs['new_key == 1'].get_data(),\n epochs.get_data())\n # ensure bad user changes break things\n epochs.metadata.drop(epochs.metadata.index[2], inplace=True)\n assert len(epochs.metadata) == len(epochs) - 1\n with pytest.raises(ValueError,\n match='metadata must have the same number of rows .*'):\n epochs['new_key == 1']\n\n # metadata should be same length as original events\n raw_data = np.random.randn(2, 10000)\n info = mne.create_info(2, 1000.)\n raw = mne.io.RawArray(raw_data, info)\n opts = dict(raw=raw, tmin=0, tmax=.001, baseline=None)\n events = [[0, 0, 1], [1, 0, 2]]\n metadata = DataFrame(events, columns=['onset', 'duration', 'value'])\n epochs = Epochs(events=events, event_id=1, metadata=metadata, **opts)\n epochs.drop_bad()\n assert len(epochs) == 1\n assert len(epochs.metadata) == 1\n with pytest.raises(ValueError, match='same number of rows'):\n Epochs(events=events, event_id=1, metadata=metadata.iloc[:1], **opts)\n\n # gh-7732: problem when repeated events and metadata\n for er in ('drop', 'merge'):\n events = [[1, 0, 1], [1, 0, 1]]\n epochs = Epochs(events=events, event_repeated=er, **opts)\n epochs.drop_bad()\n assert len(epochs) == 1\n events = [[1, 0, 1], [1, 0, 1]]\n epochs = Epochs(\n events=events, event_repeated=er, metadata=metadata, **opts)\n epochs.drop_bad()\n assert len(epochs) == 1\n assert len(epochs.metadata) == 1\n\n\ndef assert_metadata_equal(got, exp):\n \"\"\"Assert metadata are equal.\"\"\"\n if exp is None:\n assert got is None\n elif isinstance(exp, list):\n assert isinstance(got, list)\n assert len(got) == len(exp)\n for ii, (g, e) in enumerate(zip(got, exp)):\n assert list(g.keys()) == list(e.keys())\n for key in g.keys():\n assert g[key] == e[key], (ii, key)\n else: # DataFrame\n import pandas\n assert isinstance(exp, pandas.DataFrame)\n assert isinstance(got, pandas.DataFrame)\n assert set(got.columns) == set(exp.columns)\n check = (got == exp)\n assert check.all().all()\n\n\[email protected](\n ('all_event_id', 'row_events', 'keep_first', 'keep_last'),\n [({'a/1': 1, 'a/2': 2, 'b/1': 3, 'b/2': 4, 'c': 32}, # all events\n None, None, None),\n ({'a/1': 1, 'a/2': 2}, # subset of events\n None, None, None),\n (dict(), None, None, None), # empty set of events\n ({'a/1': 1, 'a/2': 2, 'b/1': 3, 'b/2': 4, 'c': 32},\n ('a/1', 'a/2', 'b/1', 'b/2'), ('a', 'b'), 'c')]\n)\n@requires_pandas\ndef test_make_metadata(all_event_id, row_events, keep_first,\n keep_last):\n \"\"\"Test that make_metadata works.\"\"\"\n raw, all_events, _ = _get_data()\n tmin, tmax = -0.5, 1.5\n sfreq = raw.info['sfreq']\n kwargs = dict(events=all_events, event_id=all_event_id,\n row_events=row_events,\n keep_first=keep_first, keep_last=keep_last,\n tmin=tmin, tmax=tmax,\n sfreq=sfreq)\n\n if not kwargs['event_id']:\n with pytest.raises(ValueError, match='must contain at least one'):\n make_metadata(**kwargs)\n return\n\n metadata, events, event_id = make_metadata(**kwargs)\n\n assert len(metadata) == len(events)\n\n if row_events:\n assert set(metadata['event_name']) == set(row_events)\n else:\n assert set(metadata['event_name']) == set(event_id.keys())\n\n # Check we have columns all events\n keep_first = [] if keep_first is None else keep_first\n keep_last = [] if keep_last is None else keep_last\n event_names = sorted(set(event_id.keys()) | set(keep_first) |\n set(keep_last))\n\n for event_name in event_names:\n assert event_name in metadata.columns\n\n # Check the time-locked event's metadata\n for _, row in metadata.iterrows():\n event_name = row['event_name']\n assert np.isclose(row[event_name], 0)\n\n # Check non-time-locked events' metadata\n for row_idx, row in metadata.iterrows():\n event_names = sorted(set(event_id.keys()) | set(keep_first) |\n set(keep_last) - set(row['event_name']))\n for event_name in event_names:\n if event_name in keep_first or event_name in keep_last:\n assert isinstance(row[event_name], float)\n if not ((event_name == 'a' and row_idx == 30) or\n (event_name == 'b' and row_idx == 14) or\n (event_name == 'c' and row_idx != 16)):\n assert not np.isnan(row[event_name])\n\n if event_name in keep_first and event_name not in all_event_id:\n assert (row[f'first_{event_name}'] is None or\n isinstance(row[f'first_{event_name}'], str))\n elif event_name in keep_last and event_name not in all_event_id:\n assert (row[f'last_{event_name}'] is None or\n isinstance(row[f'last_{event_name}'], str))\n\n Epochs(raw, events=events, event_id=event_id, metadata=metadata,\n verbose='warning')\n\n\ndef test_events_list():\n \"\"\"Test that events can be a list.\"\"\"\n events = [[100, 0, 1], [200, 0, 1], [300, 0, 1]]\n epochs = mne.Epochs(mne.io.RawArray(np.random.randn(10, 1000),\n mne.create_info(10, 1000.)),\n events=events)\n assert_array_equal(epochs.events, np.array(events))\n assert (repr(epochs)) # test repr\n assert (epochs._repr_html_()) # test _repr_html_\n\n\ndef test_save_overwrite(tmp_path):\n \"\"\"Test saving with overwrite functionality.\"\"\"\n tempdir = str(tmp_path)\n raw = mne.io.RawArray(np.random.RandomState(0).randn(100, 10000),\n mne.create_info(100, 1000.))\n\n events = mne.make_fixed_length_events(raw, 1)\n epochs = mne.Epochs(raw, events)\n\n # scenario 1: overwrite=False and there isn't a file to overwrite\n # make a filename that has not already been saved to\n fname1 = op.join(tempdir, 'test_v1-epo.fif')\n # run function to be sure it doesn't throw an error\n epochs.save(fname1, overwrite=False)\n # check that the file got written\n assert op.isfile(fname1)\n\n # scenario 2: overwrite=False and there is a file to overwrite\n # fname1 exists because of scenario 1 above\n with pytest.raises(IOError, match='Destination file exists.'):\n epochs.save(fname1, overwrite=False)\n\n # scenario 3: overwrite=True and there isn't a file to overwrite\n # make up a filename that has not already been saved to\n fname2 = op.join(tempdir, 'test_v2-epo.fif')\n # run function to be sure it doesn't throw an error\n epochs.save(fname2, overwrite=True)\n # check that the file got written\n assert op.isfile(fname2)\n with pytest.raises(IOError, match='exists'):\n epochs.save(fname2)\n\n # scenario 4: overwrite=True and there is a file to overwrite\n # run function to be sure it doesn't throw an error\n # fname2 exists because of scenario 1 above\n epochs.save(fname2, overwrite=True)\n\n\[email protected]('preload', (True, False))\[email protected]('is_complex', (True, False))\[email protected]('fmt, rtol', [('single', 2e-6), ('double', 1e-10)])\ndef test_save_complex_data(tmp_path, preload, is_complex, fmt, rtol):\n \"\"\"Test whether epochs of hilbert-transformed data can be saved.\"\"\"\n raw, events = _get_data()[:2]\n raw.load_data()\n if is_complex:\n raw.apply_hilbert(envelope=False, n_fft=None)\n epochs = Epochs(raw, events[:1], preload=True)[0]\n temp_fname = op.join(str(tmp_path), 'test-epo.fif')\n epochs.save(temp_fname, fmt=fmt)\n data = epochs.get_data().copy()\n epochs_read = read_epochs(temp_fname, proj=False, preload=preload)\n data_read = epochs_read.get_data()\n want_dtype = np.complex128 if is_complex else np.float64\n assert data.dtype == want_dtype\n assert data_read.dtype == want_dtype\n # XXX for some reason some random samples in here are off by a larger\n # factor...\n if fmt == 'single' and not preload and not is_complex:\n rtol = 2e-4\n assert_allclose(data_read, data, rtol=rtol)\n\n\ndef test_no_epochs(tmp_path):\n \"\"\"Test that having the first epoch bad does not break writing.\"\"\"\n # a regression noticed in #5564\n raw, events = _get_data()[:2]\n reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)\n raw.info['bads'] = ['MEG 2443', 'EEG 053']\n epochs = mne.Epochs(raw, events, reject=reject)\n epochs.save(op.join(str(tmp_path), 'sample-epo.fif'), overwrite=True)\n assert 0 not in epochs.selection\n assert len(epochs) > 0\n # and with no epochs remaining\n raw.info['bads'] = []\n epochs = mne.Epochs(raw, events, reject=reject)\n with pytest.warns(RuntimeWarning, match='no data'):\n epochs.save(op.join(str(tmp_path), 'sample-epo.fif'), overwrite=True)\n assert len(epochs) == 0 # all dropped\n\n\ndef test_readonly_times():\n \"\"\"Test that the times property is read only.\"\"\"\n raw, events = _get_data()[:2]\n epochs = Epochs(raw, events[:1], preload=True)\n with pytest.raises(ValueError, match='read-only'):\n epochs._times_readonly += 1\n with pytest.raises(ValueError, match='read-only'):\n epochs.times += 1\n with pytest.raises(ValueError, match='read-only'):\n epochs.times[:] = 0.\n\n\ndef test_channel_types_mixin():\n \"\"\"Test channel types mixin.\"\"\"\n raw, events = _get_data()[:2]\n epochs = Epochs(raw, events[:1], preload=True)\n ch_types = epochs.get_channel_types()\n assert len(ch_types) == len(epochs.ch_names)\n assert all(np.in1d(ch_types, ['mag', 'grad', 'eeg', 'eog', 'stim']))\n\n\ndef test_average_methods():\n \"\"\"Test average methods.\"\"\"\n n_epochs, n_channels, n_times = 5, 10, 20\n sfreq = 1000.\n data = rng.randn(n_epochs, n_channels, n_times)\n\n events = np.array([np.arange(n_epochs), [0] * n_epochs, [1] * n_epochs]).T\n # Add second event type\n events[-2:, 2] = 2\n event_id = dict(first=1, second=2)\n\n info = create_info(n_channels, sfreq, 'eeg')\n epochs = EpochsArray(data, info, events, event_id=event_id)\n\n for method in ('mean', 'median'):\n if method == \"mean\":\n def fun(data):\n return np.mean(data, axis=0)\n elif method == \"median\":\n def fun(data):\n return np.median(data, axis=0)\n\n evoked_data = epochs.average(method=method).data\n assert_array_equal(evoked_data, fun(data))\n\n # Test averaging by event type\n ev = epochs.average(by_event_type=True)\n assert len(ev) == 2\n assert ev[0].comment == 'first'\n assert_array_equal(ev[0].data, np.mean(data[:-2], axis=0))\n assert ev[1].comment == 'second'\n assert_array_equal(ev[1].data, np.mean(data[-2:], axis=0))\n\n\[email protected]('relative', (True, False))\ndef test_shift_time(relative):\n \"\"\"Test the timeshift method.\"\"\"\n timeshift = 13.5e-3 # Using sub-ms timeshift to test for sample accuracy.\n raw, events = _get_data()[:2]\n epochs = Epochs(raw, events[:1], preload=True, baseline=None)\n avg = epochs.average().shift_time(timeshift, relative=relative)\n avg2 = epochs.shift_time(timeshift, relative=relative).average()\n assert_array_equal(avg.times, avg2.times)\n assert_equal(avg.first, avg2.first)\n assert_equal(avg.last, avg2.last)\n assert_array_equal(avg.data, avg2.data)\n\n\[email protected]('preload', (True, False))\ndef test_shift_time_raises_when_not_loaded(preload):\n \"\"\"Test whether shift_time throws an exception when data is not loaded.\"\"\"\n timeshift = 13.5e-3 # Using sub-ms timeshift to test for sample accuracy.\n raw, events = _get_data()[:2]\n epochs = Epochs(raw, events[:1], preload=preload, baseline=None)\n if not preload:\n pytest.raises(RuntimeError, epochs.shift_time, timeshift)\n else:\n epochs.shift_time(timeshift)\n\n\[email protected]_testing_data\[email protected]('preload', (True, False))\[email protected]('fname', (fname_raw_testing, raw_fname))\ndef test_epochs_drop_selection(fname, preload):\n \"\"\"Test epochs drop and selection.\"\"\"\n raw = read_raw_fif(fname, preload=True)\n raw.info['bads'] = ['MEG 2443']\n events = mne.make_fixed_length_events(raw, id=1, start=0.5, duration=1.0)\n assert len(events) > 10\n kwargs = dict(tmin=-0.2, tmax=0.5, proj=False, baseline=(None, 0))\n reject = dict(mag=4e-12, grad=4000e-13)\n\n # Hack the first channel data to store the desired selection in epoch data\n raw._data[0] = 0.\n scale = 1e-13\n vals = scale * np.arange(1, len(events) + 1)\n raw._data[0, events[:, 0] - raw.first_samp + 1] = vals\n\n def _get_selection(epochs):\n \"\"\"Get the desired selection from our modified epochs.\"\"\"\n selection = np.round(epochs.get_data()[:, 0].max(axis=-1) / scale)\n return selection.astype(int) - 1\n\n # No rejection\n epochs = mne.Epochs(raw, events, preload=preload, **kwargs)\n if not preload:\n epochs.drop_bad()\n assert len(epochs) == len(events) # none dropped\n selection = _get_selection(epochs)\n assert_array_equal(np.arange(len(events)), selection) # kept all\n assert_array_equal(epochs.selection, selection)\n\n # Dropping during construction\n epochs = mne.Epochs(raw, events, preload=preload, reject=reject, **kwargs)\n if not preload:\n epochs.drop_bad()\n assert 4 < len(epochs) < len(events) # some dropped\n selection = _get_selection(epochs)\n assert_array_equal(selection, epochs.selection)\n good_selection = selection\n\n # Dropping after construction\n epochs = mne.Epochs(raw, events, preload=preload, **kwargs)\n if not preload:\n epochs.drop_bad()\n assert len(epochs) == len(events)\n epochs.drop_bad(reject=reject, verbose=True)\n assert_array_equal(epochs.selection, good_selection) # same as before\n selection = _get_selection(epochs)\n assert_array_equal(selection, epochs.selection)\n\n # Dropping after construction manually\n epochs = mne.Epochs(raw, events, preload=preload, **kwargs)\n if not preload:\n epochs.drop_bad()\n assert_array_equal(epochs.selection, np.arange(len(events))) # no drops\n drop_idx = [1, 3]\n want_selection = np.setdiff1d(np.arange(len(events)), drop_idx)\n epochs.drop(drop_idx)\n assert_array_equal(epochs.selection, want_selection)\n selection = np.round(epochs.get_data()[:, 0].max(axis=-1) / scale)\n selection = selection.astype(int) - 1\n assert_array_equal(selection, epochs.selection)\n\n\[email protected]('kind', ('file', 'bytes'))\[email protected]('preload', (True, False))\ndef test_file_like(kind, preload, tmp_path):\n \"\"\"Test handling with file-like objects.\"\"\"\n tempdir = str(tmp_path)\n raw = mne.io.RawArray(np.random.RandomState(0).randn(100, 10000),\n mne.create_info(100, 1000.))\n events = mne.make_fixed_length_events(raw, 1)\n epochs = mne.Epochs(raw, events, preload=preload)\n fname = op.join(tempdir, 'test-epo.fif')\n epochs.save(fname, overwrite=True)\n\n with open(fname, 'rb') as file_fid:\n fid = BytesIO(file_fid.read()) if kind == 'bytes' else file_fid\n assert not fid.closed\n assert not file_fid.closed\n with pytest.raises(ValueError, match='preload must be used with file'):\n read_epochs(fid, preload=False)\n assert not fid.closed\n assert not file_fid.closed\n assert file_fid.closed\n\n\[email protected]('preload', (True, False))\ndef test_epochs_get_data_item(preload):\n \"\"\"Test epochs.get_data(item=...).\"\"\"\n raw, events, _ = _get_data()\n epochs = Epochs(raw, events[:10], event_id, tmin, tmax, preload=preload)\n if not preload:\n with pytest.raises(ValueError, match='item must be None'):\n epochs.get_data(item=0)\n epochs.drop_bad()\n one_data = epochs.get_data(item=0)\n one_epo = epochs[0]\n assert_array_equal(one_data, one_epo.get_data())\n\n\ndef test_pick_types_reject_flat_keys():\n \"\"\"Test that epochs.pick_types removes keys from reject/flat.\"\"\"\n raw, events, _ = _get_data()\n event_id = {'a/1': 1, 'a/2': 2, 'b/1': 3, 'b/2': 4}\n picks = pick_types(raw.info, meg=True, eeg=True, ecg=True, eog=True)\n epochs = Epochs(raw, events, event_id, preload=True, picks=picks,\n reject=dict(grad=1e-10, mag=1e-10, eeg=1e-3, eog=1e-3),\n flat=dict(grad=1e-16, mag=1e-16, eeg=1e-16, eog=1e-16))\n\n assert sorted(epochs.reject.keys()) == ['eeg', 'eog', 'grad', 'mag']\n assert sorted(epochs.flat.keys()) == ['eeg', 'eog', 'grad', 'mag']\n epochs.pick_types(meg=True, eeg=False, ecg=False, eog=False)\n assert sorted(epochs.reject.keys()) == ['grad', 'mag']\n assert sorted(epochs.flat.keys()) == ['grad', 'mag']\n\n\[email protected]_testing_data\ndef test_make_fixed_length_epochs():\n \"\"\"Test dividing raw data into equal-sized consecutive epochs.\"\"\"\n raw = read_raw_fif(raw_fname, preload=True)\n epochs = make_fixed_length_epochs(raw, duration=1, preload=True)\n # Test Raw with annotations\n annot = Annotations(onset=[0], duration=[5], description=['BAD'])\n raw_annot = raw.set_annotations(annot)\n epochs_annot = make_fixed_length_epochs(raw_annot, duration=1.0,\n preload=True)\n assert len(epochs) > 10\n assert len(epochs_annot) > 10\n assert len(epochs) > len(epochs_annot)\n\n # overlaps\n epochs = make_fixed_length_epochs(raw, duration=1)\n assert len(epochs.events) > 10\n epochs_ol = make_fixed_length_epochs(raw, duration=1, overlap=0.5)\n assert len(epochs_ol.events) > 20\n epochs_ol_2 = make_fixed_length_epochs(raw, duration=1, overlap=0.9)\n assert len(epochs_ol_2.events) > 100\n assert_array_equal(epochs_ol_2.events[:, 0],\n np.unique(epochs_ol_2.events[:, 0]))\n with pytest.raises(ValueError, match='overlap must be'):\n make_fixed_length_epochs(raw, duration=1, overlap=1.1)\n\n # id\n epochs = make_fixed_length_epochs(raw, duration=1, preload=True, id=2)\n assert '2' in epochs.event_id and len(epochs.event_id) == 1\n\n\ndef test_epochs_huge_events(tmp_path):\n \"\"\"Test epochs with event numbers that are too large.\"\"\"\n data = np.zeros((1, 1, 1000))\n info = create_info(1, 1000., 'eeg')\n events = np.array([0, 0, 2147483648], np.int64)\n with pytest.raises(ValueError, match=r'shape \\(N, 3\\)'):\n EpochsArray(data, info, events)\n events = events[np.newaxis]\n with pytest.raises(ValueError, match='must not exceed'):\n EpochsArray(data, info, events)\n epochs = EpochsArray(data, info)\n epochs.events = events\n with pytest.raises(TypeError, match='exceeds maximum'):\n epochs.save(tmp_path / 'temp-epo.fif')\n\n\ndef _old_bad_write(fid, kind, arr):\n if kind == FIFF.FIFF_MNE_EVENT_LIST:\n arr = arr.copy()\n arr[0, -1] = -1000 # it's transposed\n return write_int(fid, kind, arr)\n\n\ndef test_concat_overflow(tmp_path, monkeypatch):\n \"\"\"Test overflow events during concat.\"\"\"\n data = np.zeros((2, 10, 1000))\n events = np.array([[0, 0, 1], [INT32_MAX, 0, 2]])\n info = mne.create_info(10, 1000., 'eeg')\n epochs_1 = mne.EpochsArray(data, info, events)\n epochs_2 = mne.EpochsArray(data, info, events)\n with pytest.warns(RuntimeWarning, match='consecutive increasing'):\n epochs = mne.concatenate_epochs((epochs_1, epochs_2))\n assert_array_less(0, epochs.events[:, 0])\n fname = tmp_path / 'temp-epo.fif'\n epochs.save(fname)\n epochs = read_epochs(fname)\n assert_array_less(0, epochs.events[:, 0])\n assert_array_less(epochs.events[:, 0], INT32_MAX + 1)\n # with our old behavior\n monkeypatch.setattr(mne.epochs, 'write_int', _old_bad_write)\n epochs.save(fname, overwrite=True)\n with pytest.warns(RuntimeWarning, match='Incorrect events'):\n epochs = read_epochs(fname)\n assert_array_less(0, epochs.events[:, 0])\n assert_array_less(epochs.events[:, 0], INT32_MAX + 1)\n\n\ndef test_epochs_baseline_after_cropping(tmp_path):\n \"\"\"Epochs.baseline should be retained if baseline period was cropped.\"\"\"\n sfreq = 1000\n tstep = 1. / sfreq\n times = np.arange(0, 2 + tstep, tstep)\n\n # Linear ramp: 0–100 µV\n data = (scipy.signal.sawtooth(2 * np.pi * 0.25 * times, 0.5)\n .reshape(1, -1)) * 50e-6 + 50e-6\n\n ch_names = ['EEG 001']\n ch_types = ['eeg']\n info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)\n raw = mne.io.RawArray(data, info)\n\n event_id = dict(event=1)\n events = np.array([[1000, 0, event_id['event']]])\n epochs_orig = mne.Epochs(raw=raw, events=events, event_id=event_id,\n tmin=-0.2, tmax=0.2, baseline=(-0.1, 0.1))\n\n # Assert baseline correction is working as intended.\n samp_min = 1000 - 200\n samp_max = 1000 + 200\n expected_data = data.copy()[0, samp_min:samp_max + 1]\n baseline = expected_data[100:301]\n expected_data -= baseline.mean()\n expected_data = expected_data.reshape(1, 1, -1)\n assert_equal(epochs_orig.get_data(), expected_data)\n del expected_data, baseline, samp_min, samp_max\n\n # Even after cropping the baseline period, Epochs.baseline should remain\n # unchanged\n epochs_cropped = epochs_orig.copy().load_data().crop(tmin=0, tmax=None)\n\n assert_equal(epochs_orig.baseline, epochs_cropped.baseline)\n assert 'baseline period was cropped' in str(epochs_cropped)\n assert_equal(epochs_cropped.get_data().squeeze(),\n epochs_orig.get_data().squeeze()[200:])\n\n # Test I/O roundtrip.\n epochs_fname = tmp_path / 'temp-cropped-epo.fif'\n epochs_cropped.save(epochs_fname)\n epochs_cropped_read = mne.read_epochs(epochs_fname)\n\n assert_allclose(epochs_orig.baseline, epochs_cropped_read.baseline)\n assert 'baseline period was cropped' in str(epochs_cropped_read)\n assert_allclose(epochs_cropped.get_data(), epochs_cropped_read.get_data())\n\n\ndef test_empty_constructor():\n \"\"\"Test empty constructor for RtEpochs.\"\"\"\n info = create_info(1, 1000., 'eeg')\n event_id = 1\n tmin, tmax, baseline = -0.2, 0.5, None\n BaseEpochs(info, None, None, event_id, tmin, tmax, baseline)\n\n\ndef test_apply_function():\n \"\"\"Test apply function to epoch objects.\"\"\"\n n_channels = 10\n data = np.arange(2 * n_channels * 1000).reshape(2, n_channels, 1000)\n events = np.array([[0, 0, 1], [INT32_MAX, 0, 2]])\n info = mne.create_info(n_channels, 1000., 'eeg')\n epochs = mne.EpochsArray(data, info, events)\n data_epochs = epochs.get_data()\n\n # apply_function to all channels at once\n def fun(data):\n \"\"\"Reverse channel order without changing values.\"\"\"\n return np.eye(data.shape[1])[::-1] @ data\n\n want = data_epochs[:, ::-1]\n got = epochs.apply_function(fun, channel_wise=False).get_data()\n assert_array_equal(want, got)\n\n # apply_function channel-wise (to first 3 channels) by replacing with mean\n picks = np.arange(3)\n non_picks = np.arange(3, n_channels)\n\n def fun(data):\n return np.full_like(data, data.mean())\n\n out = epochs.apply_function(fun, picks=picks, channel_wise=True)\n expected = epochs.get_data(picks).mean(axis=-1, keepdims=True)\n assert np.all(out.get_data(picks) == expected)\n assert_array_equal(out.get_data(non_picks), epochs.get_data(non_picks))\n\n\[email protected]_testing_data\ndef test_add_channels_picks():\n \"\"\"Check that add_channels properly deals with picks.\"\"\"\n raw = mne.io.read_raw_fif(raw_fname, verbose=False)\n raw.pick([2, 3, 310]) # take some MEG and EEG\n raw.info.normalize_proj()\n\n events = mne.make_fixed_length_events(raw, id=3000, start=0)\n epochs = mne.Epochs(raw, events, event_id=3000, tmin=0, tmax=1,\n proj=True, baseline=None, reject=None, preload=True,\n decim=1)\n\n epochs_final = epochs.copy()\n epochs_bis = epochs.copy().rename_channels(lambda ch: ch + '_bis')\n epochs_final.add_channels([epochs_bis], force_update_info=True)\n epochs_final.drop_channels(epochs.ch_names)\n" ]
[ [ "numpy.dot", "numpy.in1d", "pandas.DataFrame", "numpy.max", "numpy.random.randn", "numpy.mean", "numpy.zeros_like", "numpy.any", "numpy.where", "numpy.random.default_rng", "numpy.testing.assert_equal", "numpy.allclose", "numpy.unique", "numpy.arange", "numpy.eye", "numpy.std", "matplotlib.pyplot.close", "numpy.zeros", "numpy.isclose", "numpy.testing.assert_array_almost_equal", "numpy.nonzero", "numpy.min", "numpy.isnan", "numpy.median", "numpy.logical_or", "numpy.int64", "numpy.delete", "numpy.log10", "numpy.testing.assert_allclose", "numpy.array", "numpy.random.RandomState", "numpy.fft.rfft", "numpy.int32", "numpy.setdiff1d", "numpy.sort", "numpy.testing.assert_array_equal", "numpy.ones", "numpy.testing.assert_array_less" ] ]
zhangyongzhe20/nlpia
[ "2662b6e4a8d0668e39221199af73b2acdfef6fec" ]
[ "src/nlpia/book/examples/ch03-2.py" ]
[ "\n# coding: utf-8\n\n# In[1]:\n\n\nfrom nltk.tokenize import TreebankWordTokenizer\n\nsentence = \"The faster Harry got to the store, the faster Harry, the faster, would get home.\"\ntokenizer = TreebankWordTokenizer()\ntoken_sequence = tokenizer.tokenize(sentence.lower())\nprint(token_sequence)\n\n\n# In[2]:\n\n\nfrom collections import Counter\nbag_of_words = Counter(token_sequence)\nprint(bag_of_words)\n\n\n# In[3]:\n\n\nword_list = bag_of_words.most_common() # Passing an integer as an argument will give you that many from the top of the list\nprint(word_list)\n\n\n# In[4]:\n\n\ntimes_harry_appears = bag_of_words['harry']\ntotal_words = len(word_list) # The number of tokens from our original source.\ntf = times_harry_appears/total_words\n\nprint(tf)\n\n\n# In[5]:\n\n\nkite_text = \"A kite is traditionally a tethered heavier-than-air craft with wing surfaces that react against the air to create lift and drag. A kite consists of wings, tethers, and anchors. Kites often have a bridle to guide the face of the kite at the correct angle so the wind can lift it. A kite's wing also may be so designed so a bridle is not needed; when kiting a sailplane for launch, the tether meets the wing at a single point. A kite may have fixed or moving anchors. Untraditionally in technical kiting, a kite consists of tether-set-coupled wing sets; even in technical kiting, though, a wing in the system is still often called the kite. The lift that sustains the kite in flight is generated when air flows around the kite's surface, producing low pressure above and high pressure below the wings. The interaction with the wind also generates horizontal drag along the direction of the wind. The resultant force vector from the lift and drag force components is opposed by the tension of one or more of the lines or tethers to which the kite is attached. The anchor point of the kite line may be static or moving (e.g., the towing of a kite by a running person, boat, free-falling anchors as in paragliders and fugitive parakites or vehicle). The same principles of fluid flow apply in liquids and kites are also used under water. A hybrid tethered craft comprising both a lighter-than-air balloon as well as a kite lifting surface is called a kytoon. Kites have a long and varied history and many different types are flown individually and at festivals worldwide. Kites may be flown for recreation, art or other practical uses. Sport kites can be flown in aerial ballet, sometimes as part of a competition. Power kites are multi-line steerable kites designed to generate large forces which can be used to power activities such as kite surfing, kite landboarding, kite fishing, kite buggying and a new trend snow kiting. Even Man-lifting kites have been made.\"\n\n\n# In[6]:\n\n\nfrom collections import Counter\nfrom nltk.tokenize import TreebankWordTokenizer\n\ntokenizer = TreebankWordTokenizer()\n\n# kite_text = \"A kite is traditionally ...\" # Step left to user, so we aren't repeating ourselves\ntokens = tokenizer.tokenize(kite_text.lower())\nprint(\"length: \", len(tokens))\ntoken_sequence = Counter(tokens)\nprint(token_sequence)\n\n\n# In[7]:\n\n\nimport nltk\n\nnltk.download('stopwords')\nstopwords = nltk.corpus.stopwords.words('english')\nprint(\"num of stopwords: \", len(stopwords))\ntokens = [x for x in tokens if x not in stopwords]\nkite_count = Counter(tokens)\nprint(kite_count)\n\n\n# In[8]:\n\n\ndocument_vector = []\ndoc_length = len(tokens)\nfor key, value in kite_count.most_common():\n document_vector.append(value / doc_length)\n\nprint(document_vector)\n\n\n# In[9]:\n\n\ndoc_0 = \"The faster Harry got to the store, the faster Harry, the faster, would get home.\"\ndoc_1 = \"Harry is hairy and faster than Jill.\"\ndoc_2 = \"Jill is not as hairy as Harry.\"\n\n\n# In[10]:\n\n\ntokens_0 = tokenizer.tokenize(doc_0.lower())\ntokens_1 = tokenizer.tokenize(doc_1.lower())\ntokens_2 = tokenizer.tokenize(doc_2.lower())\nlexicon = set(tokens_0 + tokens_1 + tokens_2)\n\nprint(lexicon)\n\n\n# In[11]:\n\n\nprint(len(lexicon))\n\n\n# In[12]:\n\n\nfrom collections import OrderedDict\n\nvector_template = OrderedDict((token, 0) for token in lexicon)\nprint(\"vector template: \", vector_template)\n\n\n# In[13]:\n\n\nimport copy\n\ndocument_vectors = []\nfor doc in [doc_0, doc_1, doc_2]:\n\n vec = copy.copy(vector_template) # So we are dealing with new objects, not multiple references to the same object\n\n tokens = tokenizer.tokenize(doc.lower())\n token_counts = Counter(tokens)\n\n for key, value in token_counts.items():\n vec[key] = value / len(lexicon)\n document_vectors.append(vec)\n\n\n# In[14]:\n\n\nimport math\n\ndef cosine_sim(vec1, vec2):\n \"\"\"\n Since our vectors are dictionaries, lets convert them to lists for easier mathing.\n \"\"\"\n vec1 = [val for val in vec1.values()]\n vec2 = [val for val in vec2.values()]\n print(\"vec1: \", vec2)\n \n dot_prod = 0\n for i, v in enumerate(vec1):\n dot_prod += v * vec2[i]\n \n mag_1 = math.sqrt(sum([x**2 for x in vec1]))\n mag_2 = math.sqrt(sum([x**2 for x in vec2]))\n \n return dot_prod / (mag_1 * mag_2)\n\n\n# In[15]:\n\n\nfrom nltk.corpus import brown\nprint(len(brown.words())) # words is a builtin method of the nltk corpus object that gives a list of tokens\n\n\n# In[16]:\n\n\nfrom collections import Counter\n\npuncs = [',', '.', '--', '-', '!', '?', ':', ';', '``', \"''\", '(', ')', '[', ']']\nword_list = [x.lower() for x in brown.words() if x not in puncs]\ntoken_counts = Counter(word_list)\nprint(token_counts.most_common(20))\n\n\n# In[17]:\n\n\nhistory_text = 'Kites were invented in China, where materials ideal for kite building were readily available: silk fabric for sail material; fine, high-tensile-strength silk for flying line; and resilient bamboo for a strong, lightweight framework. The kite has been claimed as the invention of the 5th-century BC Chinese philosophers Mozi (also Mo Di) and Lu Ban (also Gongshu Ban). By 549 AD paper kites were certainly being flown, as it was recorded that in that year a paper kite was used as a message for a rescue mission. Ancient and medieval Chinese sources describe kites being used for measuring distances, testing the wind, lifting men, signaling, and communication for military operations. The earliest known Chinese kites were flat (not bowed) and often rectangular. Later, tailless kites incorporated a stabilizing bowline. Kites were decorated with mythological motifs and legendary figures; some were fitted with strings and whistles to make musical sounds while flying. From China, kites were introduced to Cambodia, Thailand, India, Japan, Korea and the western world. After its introduction into India, the kite further evolved into the fighter kite, known as the patang in India, where thousands are flown every year on festivals such as Makar Sankranti. Kites were known throughout Polynesia, as far as New Zealand, with the assumption being that the knowledge diffused from China along with the people. Anthropomorphic kites made from cloth and wood were used in religious ceremonies to send prayers to the gods. Polynesian kite traditions are used by anthropologists get an idea of early \"primitive\" Asian traditions that are believed to have at one time existed in Asia.'\n\n\n# In[18]:\n\n\n# intro_text = \"A kite is traditionally ...\" # Step left to user, as above\nintro_text = kite_text.lower()\nintro_tokens = tokenizer.tokenize(intro_text)\n# history_text = \"Kites were invented in China, ...\" # Also as above\nhistory_text = history_text.lower()\nhistory_tokens = tokenizer.tokenize(history_text)\nintro_total = len(intro_tokens)\nhistory_total = len(history_tokens)\n\n\n# In[19]:\n\n\nintro_tf = {}\nhistory_tf = {}\nintro_counts = Counter(intro_tokens)\nintro_tf['kite'] = intro_counts['kite'] / intro_total\nhistory_counts = Counter(history_tokens)\nhistory_tf['kite'] = history_counts['kite'] / history_total\nprint('Term Frequency of \"kite\" in intro is: {}'.format(intro_tf['kite']))\nprint('Term Frequency of \"kite\" in history is: {}'.format(history_tf['kite']))\n\n\n# In[20]:\n\n\nintro_tf['and'] = intro_counts['and'] / intro_total\nhistory_tf['and'] = history_counts['and'] / history_total\nprint('Term Frequency of \"and\" in intro is: {}'.format(intro_tf['and']))\nprint('Term Frequency of \"and\" in history is: {}'.format(history_tf['and']))\n\n\n# In[21]:\n\n\nnum_docs_containing_and = 0\nfor doc in [intro_tokens, history_tokens]:\n if 'and' in doc:\n num_docs_containing_and += 1\n\n\n# In[22]:\n\n\nnum_docs_containing_kite = 0\nfor doc in [intro_tokens, history_tokens]:\n if 'kite' in doc:\n num_docs_containing_kite += 1\n\n\n# In[23]:\n\n\nnum_docs_containing_china = 0\nfor doc in [intro_tokens, history_tokens]:\n if 'china' in doc:\n num_docs_containing_china += 1\n\n\n# In[24]:\n\n\nintro_tf['china'] = intro_counts['china'] / intro_total\nhistory_tf['china'] = history_counts['china'] / history_total\n\n\n# In[25]:\n\n\nnum_docs = 2\nintro_idf = {}\nhistory_idf = {}\nintro_idf['and'] = num_docs / num_docs_containing_and \nhistory_idf['and'] = num_docs / num_docs_containing_and \nintro_idf['kite'] = num_docs / num_docs_containing_kite \nhistory_idf['kite'] = num_docs / num_docs_containing_kite \nintro_idf['china'] = num_docs / num_docs_containing_china \nhistory_idf['china'] = num_docs / num_docs_containing_china \n\n\n# In[26]:\n\n\nintro_tfidf = {}\n\nintro_tfidf['and'] = intro_tf['and'] * intro_idf['and']\nintro_tfidf['kite'] = intro_tf['kite'] * intro_idf['kite']\nintro_tfidf['china'] = intro_tf['china'] * intro_idf['china']\n\n\n# In[27]:\n\n\nhistory_tfidf = {}\n\nhistory_tfidf['and'] = history_tf['and'] * history_idf['and']\nhistory_tfidf['kite'] = history_tf['kite'] * history_idf['kite']\nhistory_tfidf['china'] = history_tf['china'] * history_idf['china']\n\n\n# In[28]:\n\n\ndocument_tfidf_vectors = []\ndocuments = [doc_0, doc_1, doc_2]\nprint(\"documents \", documents)\nfor doc in documents:\n\n vec = copy.copy(vector_template) # So we are dealing with new objects, not multiple references to the same object\n\n tokens = tokenizer.tokenize(doc.lower())\n token_counts = Counter(tokens)\n\n for key, value in token_counts.items():\n docs_containing_key = 0\n for _doc in documents:\n if key in _doc:\n docs_containing_key += 1\n tf = value / len(lexicon)\n if docs_containing_key:\n idf = len(documents) / docs_containing_key\n else:\n idf = 0\n vec[key] = tf * idf\n print(\"processed vector: \", vec)\n document_tfidf_vectors.append(vec)\n\n\n# In[29]:\n\n\nquery = \"How long does it take to get to the store?\"\nquery_vec = copy.copy(vector_template) \n\nquery_vec = copy.copy(vector_template) # So we are dealing with new objects, not multiple references to the same object\n\ntokens = tokenizer.tokenize(query.lower())\ntoken_counts = Counter(tokens)\n\nfor key, value in token_counts.items():\n docs_containing_key = 0\n for _doc in documents:\n if key in _doc.lower():\n docs_containing_key += 1\n if docs_containing_key == 0: # We didn't find that token in the lexicon go to next key\n continue\n tf = value / len(tokens)\n idf = len(documents) / docs_containing_key \n query_vec[key] = tf * idf \n\nprint(\"query_vect: \", query_vec)\nprint(cosine_sim(query_vec, document_tfidf_vectors[0]))\nprint(cosine_sim(query_vec, document_tfidf_vectors[1]))\nprint(cosine_sim(query_vec, document_tfidf_vectors[2]))\n\n\n# In[30]:\n\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\ncorpus = [doc_0, doc_1, doc_2]\n\nvectorizer = TfidfVectorizer(min_df=1)\nmodel = vectorizer.fit_transform(corpus)\n\nprint(model.todense()) # The model becomes a sparse numpy matrix, as in a large corpus there would be mostly zeros to deal with. todense() brings it back to a regular numpy matrix for our viewing pleasure.\n\n" ]
[ [ "sklearn.feature_extraction.text.TfidfVectorizer" ] ]
noraj/shit
[ "81186df44d22a3e690aff6025048cce2a4f7b77f" ]
[ "tests/test_hide.py" ]
[ "#!/usr/bin/env python2\nimport os\nimport sys\nimport unittest\nimport numpy as np\nfrom scipy.misc import lena\n\n\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(os.path.join(BASE_DIR, '..', 'shit'))\nimport hide\n\n\nclass HideTest(unittest.TestCase):\n ###########################################################################\n #################################################################### bit ##\n ###########################################################################\n def test_convert_msg_to_bin(self):\n MSG = \"OK\"\n BINMSG = [0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1]\n\n self.assertEqual(BINMSG, hide.convert_msg_to_bin(MSG))\n\n def test_msg_too_long_for_image(self):\n with self.assertRaises(AssertionError):\n hide.encode_msg_in_bit(img_array=lena(), msg=\"A\" * (lena().size + 1), stegobit=3)\n\n def test_img_path_is_not_a_file(self):\n with self.assertRaises(AssertionError):\n hide.encode_nth_bit(inp=\"./nonExistent\", out=\"./\", msg=\"OK\", stegobit=3)\n\n def test_wrong_stegobit_value(self):\n with self.assertRaises(AssertionError):\n hide.encode_nth_bit(inp=__file__, out=\"./\", msg=\"OK\", stegobit=9)\n with self.assertRaises(AssertionError):\n hide.encode_nth_bit(inp=__file__, out=\"./\", msg=\"OK\", stegobit=-1)\n\n def test_if_output_extension_equals_jpg(self):\n for test in ('jpg', 'jpeg', 'jpEg', 'JPG'):\n with self.assertRaises(AssertionError):\n hide.encode_nth_bit(inp=__file__, out=\"./bla.%s\" % test, msg=\"OK\", stegobit=1)\n\n ###########################################################################\n ############################################################## patchwork ##\n ###########################################################################\n def test_img_path_is_not_a_file(self):\n with self.assertRaises(AssertionError):\n hide.encode_patchwork(inp=\"./nonExistent\", out=\"./\", msg=\"OK\")\n\n def test_if_output_extension_equals_jpg(self):\n for test in ('jpg', 'jpeg', 'jpEg', 'JPG'):\n with self.assertRaises(AssertionError):\n hide.encode_patchwork(inp=__file__, out=\"./bla.%s\" % test, msg=\"OK\")\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "scipy.misc.lena" ] ]
defineHong/mmdetection_hwcar
[ "9f7fa5682867e6cef03568f2c8cd8cab0aa602ac" ]
[ "mmdet/models/detectors/yolact_det.py" ]
[ "import torch\n\nfrom mmdet.core import bbox2result\nfrom ..builder import DETECTORS, build_head\nfrom .single_stage import SingleStageDetector\n\n\[email protected]_module()\nclass YOLACT_DET(SingleStageDetector):\n \"\"\"Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_\"\"\"\n\n def __init__(self,\n backbone,\n neck,\n bbox_head,\n # segm_head,\n # mask_head,\n train_cfg=None,\n test_cfg=None,\n pretrained=None):\n super(YOLACT_DET, self).__init__(backbone, neck, bbox_head, train_cfg,\n test_cfg, pretrained)\n # self.segm_head = build_head(segm_head)\n # self.mask_head = build_head(mask_head)\n # self.init_segm_mask_weights()\n\n # def init_segm_mask_weights(self):\n # \"\"\"Initialize weights of the YOLACT segm head and YOLACT mask head.\"\"\"\n # self.segm_head.init_weights()\n # self.mask_head.init_weights()\n\n def forward_dummy(self, img):\n \"\"\"Used for computing network flops.\n\n See `mmdetection/tools/analysis_tools/get_flops.py`\n \"\"\"\n raise NotImplementedError\n\n def forward_train(self,\n img,\n img_metas,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n gt_masks=None):\n \"\"\"\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n # convert Bitmap mask or Polygon Mask to Tensor here\n # gt_masks = [\n # gt_mask.to_tensor(dtype=torch.uint8, device=img.device)\n # for gt_mask in gt_masks\n # ]\n\n x = self.extract_feat(img)\n\n cls_score, bbox_pred, coeff_pred = self.bbox_head(x)\n bbox_head_loss_inputs = (cls_score, bbox_pred) + (gt_bboxes, gt_labels,\n img_metas)\n losses, sampling_results = self.bbox_head.loss(\n *bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)\n\n # segm_head_outs = self.segm_head(x[0])\n # loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels)\n # losses.update(loss_segm)\n #\n # mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas,\n # sampling_results)\n # loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes,\n # img_metas, sampling_results)\n # losses.update(loss_mask)\n\n # check NaN and Inf\n for loss_name in losses.keys():\n assert torch.isfinite(torch.stack(losses[loss_name]))\\\n .all().item(), '{} becomes infinite or NaN!'\\\n .format(loss_name)\n\n return losses\n\n def simple_test(self, img, img_metas, rescale=False):\n \"\"\"Test function without test time augmentation.\"\"\"\n x = self.extract_feat(img)\n\n cls_score, bbox_pred, coeff_pred = self.bbox_head(x)\n\n bbox_inputs = (cls_score, bbox_pred,\n coeff_pred) + (img_metas, self.test_cfg, rescale)\n det_bboxes, det_labels, det_coeffs = self.bbox_head.get_bboxes(\n *bbox_inputs)\n bbox_results = [\n bbox2result(det_bbox, det_label, self.bbox_head.num_classes)\n for det_bbox, det_label in zip(det_bboxes, det_labels)\n ]\n #\n # num_imgs = len(img_metas)\n # scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n # if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n # segm_results = [[[] for _ in range(self.mask_head.num_classes)]\n # for _ in range(num_imgs)]\n # else:\n # # if det_bboxes is rescaled to the original image size, we need to\n # # rescale it back to the testing scale to obtain RoIs.\n # if rescale and not isinstance(scale_factors[0], float):\n # scale_factors = [\n # torch.from_numpy(scale_factor).to(det_bboxes[0].device)\n # for scale_factor in scale_factors\n # ]\n # _bboxes = [\n # det_bboxes[i][:, :4] *\n # scale_factors[i] if rescale else det_bboxes[i][:, :4]\n # for i in range(len(det_bboxes))\n # ]\n # mask_preds = self.mask_head(x[0], det_coeffs, _bboxes, img_metas)\n # # apply mask post-processing to each image individually\n # segm_results = []\n # for i in range(num_imgs):\n # if det_bboxes[i].shape[0] == 0:\n # segm_results.append(\n # [[] for _ in range(self.mask_head.num_classes)])\n # else:\n # segm_result = self.mask_head.get_seg_masks(\n # mask_preds[i], det_labels[i], img_metas[i], rescale)\n # segm_results.append(segm_result)\n return bbox_results\n\n def aug_test(self, imgs, img_metas, rescale=False):\n \"\"\"Test with augmentations.\"\"\"\n raise NotImplementedError\n" ]
[ [ "torch.stack" ] ]
PTYin/product-search
[ "41044128f9b066e81257e141b454f427bec8157b" ]
[ "src/AEM/Model.py" ]
[ "import torch\nfrom torch import nn\nfrom common.loss import nce_loss\nimport math\n\n\nclass AttentionLayer(nn.Module):\n def __init__(self, input_dim, head_num, model_name: str):\n super(AttentionLayer, self).__init__()\n self.input_dim = input_dim\n self.head_num = head_num\n self.model_name = model_name\n\n self.query_projection = nn.Linear(input_dim, input_dim * head_num)\n self.reduce_projection = nn.Linear(head_num, 1, bias=False)\n\n # self.dropout = nn.Dropout()\n\n def reset_parameters(self):\n\n # nn.init.xavier_uniform_(self.query_projection.weight)\n\n fan_in, fan_out = self.input_dim, self.input_dim\n std = math.sqrt(2.0 / float(fan_in + fan_out))\n a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation\n nn.init.uniform_(self.query_projection.weight, -a, a)\n\n fan_in, fan_out = self.input_dim, 1\n std = math.sqrt(2.0 / float(fan_in + fan_out))\n a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation\n nn.init.uniform_(self.query_projection.bias, -a, a)\n\n nn.init.uniform_(self.reduce_projection.weight, math.sqrt(3.0), math.sqrt(3.0))\n # nn.init.xavier_uniform_(self.reduce_projection.weight)\n\n def attention_function(self, query_embedding, item_embedding):\n batch_size = len(query_embedding)\n\n # ------------tanh(W*q+b)------------\n projected_query = torch.tanh(self.query_projection(query_embedding))\n # projected_query = self.dropout(projected_query)\n # shape: (batch, 1, input_dim * hidden_dim) or (batch, input_dim * hidden_dim)\n projected_query = projected_query.view((batch_size, self.input_dim, self.head_num))\n # shape: (batch, input_dim, hidden_dim)\n\n # ------------r*tanh(W*q+b)------------\n # items_query_dotted_sum = torch.einsum('bri,bih->brh', item_embedding, projected_query)\n items_query_dotted_sum = item_embedding @ projected_query\n # shape: (batch, bought_item_num, hidden_dim)\n # ------------(r*tanh(W_1*q+b))*W_2------------\n items_query_reduce_sum = self.reduce_projection(items_query_dotted_sum)\n # shape: (batch, bought_item_num, 1)\n\n # scores = items_query_reduce_sum\n # the line below is copied from the original source code yet inconsistent with the paper\n scores = (items_query_reduce_sum - torch.max(items_query_reduce_sum, dim=1, keepdim=True)[0])\n\n return scores\n\n def forward(self, item_embedding, query_embedding, mask):\n \"\"\"\n Parameters\n -----------\n item_embedding: shape(batch, bought_item_num, input_dim)\n query_embedding: shape(batch, 1, input_dim) or (batch, input_dim)\n mask: shape(batch, bought_item_num, )\n\n Return\n -----------\n torch.Tensor: shape(batch, input_dim)\n \"\"\"\n if self.model_name == 'AEM':\n attention_score = self.attention_function(query_embedding, item_embedding)\n # shape: (batch, bought_item_num, 1)\n if mask is not None:\n # attention_score = attention_score.masked_fill(mask, -float('inf'))\n attention_score = torch.exp(attention_score) * mask\n # weight = torch.softmax(attention_score, dim=1)\n denominator = torch.sum(attention_score, dim=1, keepdim=True)\n weight = attention_score / torch.where(torch.less(denominator, 1e-7), denominator + 1, denominator)\n elif self.model_name == 'ZAM':\n item_embedding = torch.cat([torch.zeros(item_embedding.shape[0], 1, self.input_dim, device='cuda:0'),\n item_embedding], dim=1)\n attention_score = self.attention_function(query_embedding, item_embedding)\n if mask is not None:\n mask = torch.cat([torch.ones(item_embedding.shape[0], 1, 1, dtype=torch.bool, device='cuda:0'),\n mask], dim=1)\n # attention_score = attention_score.masked_fill(mask, -float('inf'))\n attention_score = torch.exp(attention_score) * mask\n # weight = torch.softmax(attention_score, dim=1)\n denominator = torch.sum(attention_score, dim=1, keepdim=True)\n weight = attention_score / torch.where(torch.less(denominator, 1e-7), denominator + 1, denominator)\n else:\n raise NotImplementedError\n # shape: (batch, bought_item_num, 1)\n\n entity_embedding = torch.sum(weight * item_embedding, dim=1)\n # shape: (batch, input_dim)\n return entity_embedding\n\n\nclass AEM(nn.Module):\n def __init__(self, word_num, item_num, embedding_size, attention_hidden_dim, l2):\n super().__init__()\n self.embedding_size = embedding_size\n self.l2 = l2\n\n self.word_embedding_layer = nn.Embedding(word_num, embedding_size, padding_idx=0)\n # self.log_sigmoid = nn.LogSigmoid()\n self.word_bias = nn.Embedding(word_num, 1, padding_idx=0)\n\n self.item_embedding_layer = nn.Embedding(item_num, embedding_size, padding_idx=0)\n self.item_bias = nn.Embedding(item_num, 1, padding_idx=0)\n self.attention_layer = AttentionLayer(embedding_size, attention_hidden_dim, self.__class__.__name__)\n\n self.query_projection = nn.Linear(embedding_size, embedding_size)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n init_width = 0.5 / self.embedding_size\n # nn.init.normal_(self.word_embedding_layer.weight, 0, 0.1)\n nn.init.uniform_(self.word_embedding_layer.weight, -init_width, init_width)\n with torch.no_grad():\n self.word_embedding_layer.weight[self.word_embedding_layer.padding_idx].fill_(0)\n nn.init.zeros_(self.word_bias.weight)\n with torch.no_grad():\n self.word_bias.weight[self.word_embedding_layer.padding_idx].fill_(0)\n\n nn.init.zeros_(self.item_embedding_layer.weight)\n with torch.no_grad():\n self.item_embedding_layer.weight[self.item_embedding_layer.padding_idx].fill_(0)\n nn.init.zeros_(self.item_bias.weight)\n with torch.no_grad():\n self.item_bias.weight[self.item_bias.padding_idx].fill_(0)\n\n # nn.init.xavier_normal_(self.query_projection.weight)\n nn.init.xavier_uniform_(self.query_projection.weight)\n\n fan_in, fan_out = self.embedding_size, 1\n std = math.sqrt(2.0 / float(fan_in + fan_out))\n a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation\n nn.init.uniform_(self.query_projection.bias, -a, a)\n\n self.attention_layer.reset_parameters()\n\n def regularization_loss(self):\n return self.l2 * (self.word_embedding_layer.weight.norm() +\n self.item_embedding_layer.weight.norm() +\n self.query_projection.weight.norm() +\n self.attention_layer.query_projection.weight.norm() +\n self.attention_layer.reduce_projection.weight.norm())\n\n def forward(self, user_bought_items, items, query_words,\n mode: str,\n user_bought_masks=None,\n review_words=None,\n neg_items=None, neg_review_words=None):\n \"\"\"\n Parameters\n -----\n user_bought_items\n (batch, bought_items)\n items\n (batch, )\n query_words\n (batch, word_num)\n mode\n user_bought_masks\n (batch, bought_items)\n review_words\n (batch, )\n neg_items\n (batch, k)\n neg_review_words\n (batch, k)\n \"\"\"\n if mode == 'output_embedding':\n item_embeddings = self.item_embedding_layer(items)\n item_biases = self.item_bias(items).squeeze(dim=1)\n\n return item_embeddings, item_biases\n user_bought_embeddings = self.item_embedding_layer(user_bought_items)\n query_embeddings = torch.mean(self.word_embedding_layer(query_words), dim=1)\n query_embeddings = torch.tanh(self.query_projection(query_embeddings))\n user_embeddings = self.attention_layer(user_bought_embeddings, query_embeddings, mask=user_bought_masks)\n # user_embeddings = user_bought_embeddings.mean(dim=1)\n # user_embeddings = user_bought_embeddings.sum(dim=1) / (~user_bought_masks).sum(dim=1, keepdim=True)\n\n personalized_model = 0.5 * (query_embeddings + user_embeddings)\n # personalized_model = query_embeddings\n\n if mode == 'test':\n return personalized_model\n elif mode == 'train':\n item_embeddings = self.item_embedding_layer(items)\n neg_item_embeddings = self.item_embedding_layer(neg_items)\n word_embeddings = self.word_embedding_layer(review_words)\n # (batch, embedding_size)\n word_biases = self.word_bias(review_words).squeeze(dim=1)\n # (batch, )\n neg_word_embeddings = self.word_embedding_layer(neg_review_words)\n # (k, embedding_size)\n neg_word_biases = self.word_bias(neg_review_words).squeeze(dim=1)\n # (k, )\n\n item_word_loss = nce_loss(item_embeddings,\n word_embeddings, neg_word_embeddings,\n word_biases, neg_word_biases).mean(dim=0)\n item_biases, neg_biases = self.item_bias(items).squeeze(dim=1), self.item_bias(neg_items).squeeze(dim=1)\n search_loss = nce_loss(personalized_model,\n item_embeddings, neg_item_embeddings,\n item_biases, neg_biases).mean(dim=0)\n\n regularization_loss = self.regularization_loss()\n return item_word_loss + search_loss + regularization_loss\n # return item_word_loss\n else:\n raise NotImplementedError\n\n\nclass ZAM(AEM):\n def __init__(self, word_num, item_num, embedding_size, attention_hidden_dim, l2):\n super().__init__(word_num, item_num, embedding_size, attention_hidden_dim, l2)\n # self.attention_layer = AttentionLayer(embedding_size, attention_hidden_dim, 'ZAM')\n" ]
[ [ "torch.nn.init.uniform_", "torch.ones", "torch.max", "torch.less", "torch.zeros", "torch.sum", "torch.nn.Embedding", "torch.exp", "torch.nn.Linear", "torch.nn.init.xavier_uniform_", "torch.no_grad", "torch.nn.init.zeros_" ] ]
radioactivebean0/baboon-tracking
[ "062351c514073aac8e1207b8b46ca89ece987928" ]
[ "src/baboon_tracking/stages/motion_detector/compute_moving_foreground.py" ]
[ "\"\"\"\r\nComputes the moving foreground using the subcomponents previously computed\r\n\"\"\"\r\nimport math\r\nimport numpy as np\r\n\r\nfrom baboon_tracking.mixins.foreground_mixin import ForegroundMixin\r\nfrom baboon_tracking.mixins.frame_mixin import FrameMixin\r\nfrom baboon_tracking.mixins.history_of_dissimilarity_mixin import (\r\n HistoryOfDissimilarityMixin,\r\n)\r\nfrom baboon_tracking.mixins.moving_foreground_mixin import MovingForegroundMixin\r\nfrom baboon_tracking.mixins.weights_mixin import WeightsMixin\r\nfrom baboon_tracking.models.frame import Frame\r\nfrom pipeline import Stage\r\nfrom pipeline.stage_result import StageResult\r\nfrom pipeline.decorators import config, stage\r\n\r\n\r\n@stage(\"history_of_dissimilarity\")\r\n@stage(\"foreground\")\r\n@stage(\"weights\")\r\n@stage(\"frame_mixin\")\r\n@config(parameter_name=\"history_frames\", key=\"motion_detector/history_frames\")\r\nclass ComputeMovingForeground(Stage, MovingForegroundMixin):\r\n \"\"\"\r\n Computes the moving foreground using the subcomponents previously computed\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n history_of_dissimilarity: HistoryOfDissimilarityMixin,\r\n foreground: ForegroundMixin,\r\n weights: WeightsMixin,\r\n frame_mixin: FrameMixin,\r\n history_frames: int,\r\n ) -> None:\r\n Stage.__init__(self)\r\n MovingForegroundMixin.__init__(self)\r\n\r\n self._history_of_dissimilarity = history_of_dissimilarity\r\n self._foreground = foreground\r\n self._weights = weights\r\n self._frame = frame_mixin\r\n self._history_frames = history_frames\r\n\r\n def execute(self) -> StageResult:\r\n weights = self._weights.weights\r\n foreground = self._foreground.foreground\r\n history_of_dissimilarity = (\r\n self._history_of_dissimilarity.history_of_dissimilarity\r\n )\r\n\r\n self.moving_foreground = Frame(\r\n self._get_moving_foreground(weights, foreground, history_of_dissimilarity),\r\n self._frame.frame.get_frame_number(),\r\n )\r\n\r\n return StageResult(True, True)\r\n\r\n def _get_moving_foreground(self, weights, foreground, dissimilarity):\r\n \"\"\"\r\n Calculates moving foreground according to figure 14 of paper\r\n Each of W and D (weights and dissimilarity) is assigned to high, medium, and low\r\n\r\n Medium commonality AND low commonality but low dissimiliarity are considered moving foreground\r\n Otherwise, it is either a still or flickering background\r\n\r\n Return frame representing moving foreground\r\n \"\"\"\r\n\r\n history_frame_count_third = math.floor(float(self._history_frames - 1) / 3)\r\n third_gray = 255.0 / 3.0\r\n\r\n weights_low = (weights <= history_frame_count_third).astype(np.uint8)\r\n weights_medium = (\r\n np.logical_and(\r\n history_frame_count_third < weights, weights < self._history_frames - 1\r\n ).astype(np.uint8)\r\n * 2\r\n )\r\n\r\n weight_levels = weights_low + weights_medium\r\n\r\n foreground_low = (foreground <= math.floor(third_gray)).astype(np.uint8)\r\n foreground_medium = (\r\n (math.floor(third_gray) < foreground)\r\n + (foreground < math.floor(2 * third_gray))\r\n ).astype(np.uint8) * 2\r\n foreground_high = (foreground >= math.floor(2 * third_gray)).astype(\r\n np.uint8\r\n ) * 3\r\n\r\n foreground_levels = foreground_low + foreground_medium + foreground_high\r\n\r\n dissimilarity_low = (dissimilarity <= math.floor(third_gray)).astype(np.uint8)\r\n dissimilarity_medium = (\r\n (math.floor(third_gray) < dissimilarity)\r\n + (dissimilarity < math.floor(2 * third_gray))\r\n ).astype(np.uint8) * 2\r\n dissimilarity_high = (dissimilarity >= math.floor(2 * third_gray)).astype(\r\n np.uint8\r\n ) * 3\r\n\r\n dissimilarity_levels = (\r\n dissimilarity_low + dissimilarity_medium + dissimilarity_high\r\n )\r\n\r\n moving_foreground = np.logical_and(\r\n weight_levels == 2,\r\n np.greater_equal(foreground_levels, dissimilarity_levels),\r\n ).astype(np.uint8)\r\n moving_foreground = moving_foreground + np.logical_and(\r\n weight_levels == 1,\r\n np.logical_and(\r\n dissimilarity_levels == 1,\r\n np.greater(foreground_levels, dissimilarity_levels),\r\n ),\r\n ).astype(np.uint8)\r\n\r\n return moving_foreground * 255\r\n" ]
[ [ "numpy.greater_equal", "numpy.logical_and", "numpy.greater" ] ]
konstantinschulz/lm-evaluation-harness
[ "b0acb3379d2fa8e15561cea033be422bff144f30" ]
[ "lm_eval/tasks/winogrande.py" ]
[ "\"\"\"\nWinoGrande: An Adversarial Winograd Schema Challenge at Scale\nhttps://arxiv.org/pdf/1907.10641.pdf\n\nWinoGrande is a collection of 44k problems, inspired by Winograd Schema Challenge\n(Levesque, Davis, and Morgenstern 2011), but adjusted to improve the scale and\nrobustness against the dataset-specific bias. Formulated as a fill-in-a-blank\ntask with binary options, the goal is to choose the right option for a given\nsentence which requires commonsense reasoning.\n\nNOTE: This evaluation of Winogrande uses partial evaluation as described by\nTrinh & Le in Simple Method for Commonsense Reasoning (2018).\nSee: https://arxiv.org/abs/1806.02847\n\nHomepage: https://leaderboard.allenai.org/winogrande/submissions/public\n\"\"\"\nimport numpy as np\nfrom lm_eval.base import rf, Task\nfrom lm_eval.metrics import mean\n\n\n_CITATION = \"\"\"\n@article{sakaguchi2019winogrande,\n title={WinoGrande: An Adversarial Winograd Schema Challenge at Scale},\n author={Sakaguchi, Keisuke and Bras, Ronan Le and Bhagavatula, Chandra and Choi, Yejin},\n journal={arXiv preprint arXiv:1907.10641},\n year={2019}\n}\n\"\"\"\n\n\nclass Winogrande(Task):\n VERSION = 0\n DATASET_PATH = \"winogrande\"\n DATASET_NAME = \"winogrande_xl\"\n\n answer_to_num = {\"1\": 0, \"2\": 1}\n\n def has_training_docs(self):\n return True\n\n def has_validation_docs(self):\n return True\n\n def has_test_docs(self):\n return False\n\n def training_docs(self):\n if self._training_docs is None:\n self._training_docs = list(self.dataset[\"train\"])\n return self._training_docs\n\n def validation_docs(self):\n return self.dataset[\"validation\"]\n\n def doc_to_text(self, doc):\n return self.partial_context(doc, doc[\"option\" + doc[\"answer\"]])\n\n def should_decontaminate(self):\n return True\n\n def doc_to_decontamination_query(self, doc):\n return doc[\"sentence\"]\n\n @classmethod\n def partial_context(cls, doc, option):\n # Substitute the pronoun in the sentence with the specified option\n # and ignore everything after.\n pronoun_loc = doc[\"sentence\"].index(\"_\")\n return doc[\"sentence\"][:pronoun_loc] + option\n\n def doc_to_target(self, doc):\n return self.partial_target(doc)\n\n @classmethod\n def partial_target(cls, doc):\n # The target is everything after the document specified pronoun.\n pronoun_loc = doc[\"sentence\"].index(\"_\") + 1\n return \" \" + doc[\"sentence\"][pronoun_loc:].strip()\n\n def construct_requests(self, doc, ctx):\n \"\"\"Uses RequestFactory to construct Requests and returns an iterable of\n Requests which will be sent to the LM.\n\n :param doc:\n The document as returned from training_docs, validation_docs, or test_docs.\n :param ctx: str\n The context string, generated by fewshot_context. This includes the natural\n language description, as well as the few shot examples, and the question\n part of the document for `doc`.\n \"\"\"\n target = self.partial_target(doc)\n lls = []\n for option in [doc[\"option1\"], doc[\"option2\"]]:\n partial_ctx = self.partial_context(doc, option)\n full_ctx = self.append_context(ctx, partial_ctx)\n lls.append(rf.loglikelihood(full_ctx, target)[0])\n return lls\n\n @classmethod\n def append_context(cls, ctx, partial_ctx):\n ctx = ctx.split(\"\\n\\n\") # Each fewshot context is on its own new line.\n ctx.pop() # Remove the correct context put in by `doc_to_text`.\n return \"\\n\\n\".join([*ctx, partial_ctx]) if ctx else partial_ctx\n\n def process_results(self, doc, results):\n \"\"\"Take a single document and the LM results and evaluates, returning a\n dict where keys are the names of submetrics and values are the values of\n the metric for that one document\n\n :param doc:\n The document as returned from training_docs, validation_docs, or test_docs.\n :param results:\n The results of the requests created in construct_requests.\n \"\"\"\n return {\"acc\": np.argmax(results) == self.answer_to_num[doc[\"answer\"]]}\n\n def aggregation(self):\n \"\"\"\n :returns: {str: [float] -> float}\n A dictionary where keys are the names of submetrics and values are\n functions that aggregate a list of metrics\n \"\"\"\n return {\"acc\": mean}\n\n def higher_is_better(self):\n \"\"\"\n :returns: {str: bool}\n A dictionary where keys are the names of submetrics and values are\n whether a higher value of the submetric is better\n \"\"\"\n return {\"acc\": True}\n" ]
[ [ "numpy.argmax" ] ]
zehao99/CEIT
[ "06f5a409a93073bb7cfd22afb3a39f500e5a24d8" ]
[ "CEIT/models/mesh.py" ]
[ "from ..readmesh import read_mesh_from_csv\nfrom ..util.utilities import get_config, PointStack, Comp, quicksort\nimport numpy as np\n\n\nclass MeshObj(object):\n \"\"\"\n Mesh object for Calculation\n \"\"\"\n\n def __init__(self, mesh_obj=None, electrode_num=None, electrode_center_list=None, electrode_radius=None):\n \"\"\"\n Initialize the mesh object, it will generate the parameter if it's not given\n Args:\n mesh_obj: see readmesh.py\n electrode_num: electrode number\n electrode_center_list: electrode center point list\n electrode_radius: electrode radius(half side length)\n \"\"\"\n self.electrode_mesh = dict()\n if mesh_obj is None or electrode_num is None or electrode_center_list is None or electrode_radius is None:\n mesh_obj, electrode_num, electrode_center_list, electrode_radius = read_mesh_from_csv().return_mesh()\n self.config = get_config()\n self.mesh_obj = mesh_obj\n self.electrode_num = electrode_num\n self.electrode_center_list = electrode_center_list\n self.electrode_radius = electrode_radius\n self.nodes = self.mesh_obj[\"node\"]\n self.point_x = self.nodes[:, 0]\n self.point_y = self.nodes[:, 1]\n self.elem = self.mesh_obj[\"element\"]\n self.elem_perm = self.mesh_obj[\"perm\"]\n self.detection_index = np.zeros((len(self.elem)))\n self.detection_elem = np.copy(self.elem)\n self.elem_param = np.zeros((np.shape(self.elem)[0], 9))\n for i in range(self.electrode_num):\n self.electrode_mesh[i] = list()\n self.initialize_parameters()\n self.calc_detection_elements()\n\n def initialize_parameters(self):\n \"\"\"\n Update parameters for each element,\n\n Parameters used for calculating sparse matrix\n Calculate electrodes' mesh area\n initialize all electrode\n \"\"\"\n x = [.0, .0, .0]\n b = [.0, .0, .0]\n c = [.0, .0, .0]\n y = [.0, .0, .0]\n count = 0\n for element in self.elem:\n\n # change to counter clockwise\n for i in range(3):\n x[i] = self.nodes[element[i], 0]\n y[i] = self.nodes[element[i], 1]\n parameter_mat = np.array([x, y, [1, 1, 1]])\n parameter_mat = parameter_mat.T # Trans to vertical\n area = np.abs(np.linalg.det(parameter_mat) / 2)\n # get interpolation parameters\n parameter_mat = np.linalg.inv(parameter_mat)\n parameter_mat = parameter_mat.T\n b = list(parameter_mat[:, 0])\n c = list(parameter_mat[:, 1])\n x_average = np.mean(x) # get center point coordinate\n y_average = np.mean(y)\n self.elem_param[count] = [area, b[0], b[1],\n b[2], c[0], c[1], c[2], x_average, y_average]\n count += 1\n for i in range(self.electrode_num):\n center = self.electrode_center_list[i]\n self.calc_electrode_elements(i, center, self.electrode_radius)\n\n def scale_mesh(self, scale_factor):\n \"\"\"\n Scale the mesh with the given scale_factor.\n\n new_length = old_length * scale_factor\n\n Args:\n scale_factor: factor used to multiply\n \"\"\"\n self.nodes *= scale_factor\n self.electrode_radius *= scale_factor\n self.electrode_center_list = list(np.array(self.electrode_center_list) * scale_factor)\n\n def calc_electrode_elements(self, electrode_number, center, radius):\n \"\"\"\n Get the electrode element sets for every electrode,\n\n According to the SQUARE area given and put values into electrode_mesh dict\n\n Args:\n electrode_number: INT current electrode number\n center: [FLOAT,FLOAT] center of electrode\n radius: FLOAT half side length of electrode\n \"\"\"\n if electrode_number >= self.electrode_num:\n raise Exception(\"the input number exceeded electrode numbers\")\n else:\n center_x, center_y = center\n count = 0\n for i, x in enumerate(self.elem_param[:, 7]):\n if (center_x + radius) >= x >= (center_x - radius) and (\n center_y + radius) >= self.elem_param[i][8] >= (center_y - radius):\n self.electrode_mesh[electrode_number].append(i)\n count += 1\n if count == 0:\n raise Exception(\n \"No element is selected, please check the input\")\n\n def return_mesh(self):\n \"\"\"\n Return the mesh dict\n\n Returns:\n dict: mesh info.\n \"\"\"\n return self.mesh_obj\n\n def return_electrode_info(self):\n return self.electrode_center_list, self.electrode_radius\n\n def calc_detection_elements(self):\n \"\"\"\n Get elements whose center is inside detection range and not inside electrode,\n \"\"\"\n original_element = self.elem\n corres_index = []\n new_elem = []\n # Flatten out the mesh inside electrode\n flattened_electrode_elem = set()\n for elems in self.electrode_mesh.values():\n for elem in elems:\n flattened_electrode_elem.add(elem)\n for i, element in enumerate(original_element):\n x_val = 0\n y_val = 0\n for idx in element:\n x_val += self.nodes[idx][0]\n y_val += self.nodes[idx][1]\n x_val /= 3\n y_val /= 3\n # filter out mesh outside detection range and on the electrodes.\n # if i not in flattened_electrode_elem and np.abs(x_val) < \\\n # self.config[\"detection_bound\"] and np.abs(y_val) < self.config[\"detection_bound\"]:\n if np.abs(x_val) < self.config[\"detection_bound\"] and np.abs(y_val) < \\\n self.config[\"detection_bound\"]:\n corres_index.append(i)\n new_elem.append(element)\n self.detection_index = np.array(corres_index)\n self.detection_elem = np.array(new_elem)\n\n def delete_outside_detect(self, list_c):\n \"\"\"\n Delete the elements inside a list corresponding to the elements inside detection\n\n Args:\n list_c : an all-element-wise list\n\n Returns:\n elements remained in detection domain\n \"\"\"\n list_c = np.array(list_c)\n if list_c.ndim > 1:\n new_list_c = np.zeros(\n (self.detection_index.shape[0], list_c.shape[1]))\n for i, j in enumerate(self.detection_index):\n new_list_c[i] = list_c[j]\n return new_list_c\n elif list_c.ndim == 1:\n new_list_c = np.zeros((self.detection_index.shape[0]))\n for i, j in enumerate(self.detection_index):\n new_list_c[i] = list_c[j]\n return new_list_c\n else:\n raise Exception(\"Transfer Shape Not Correct\")\n\n def change_conductivity(self, element_list, resistance_list):\n \"\"\"\n Change conductivity in certain area according to ELEMENT NUMBER\n\n Args:\n element_list: INT LIST element numbers to be changed\n resistance_list: FLOAT LIST same dimension list for conductivity on each element included\n \"\"\"\n if len(element_list) == len(resistance_list):\n for i, ele_num in enumerate(element_list):\n if ele_num > len(self.elem):\n raise Exception(\"Element number exceeds limit\")\n self.elem_perm[ele_num] = resistance_list[i]\n else:\n raise Exception('The length of element doesn\\'t match the length of variable')\n\n def get_perimeter(self):\n \"\"\"\n Get the perimeter of the mesh, return in idx sequence.\n\n This function get the Convex Hull of the mesh and return the chain of node index list of the hull.\n\n Returns:\n node index list on perimeter in CCW.\n \"\"\"\n # get start point\n start_point = 0\n min_y = 100000000\n x = 100000000\n for i, node in enumerate(self.nodes):\n if node[1] < min_y:\n start_point = i\n min_y = node[1]\n x = node[0]\n elif node[1] == min_y:\n if node[0] < x:\n start_point = i\n x = node[0]\n # sort the rest points\n nodes = np.zeros((self.nodes.shape[0] - 1, self.nodes.shape[1] + 1))\n pointer = 0\n for i, node in enumerate(self.nodes):\n if i != start_point:\n nodes[pointer] = np.array([node[0], node[1], i])\n pointer += 1\n comp = Comp(self.nodes[start_point])\n quicksort(nodes, comp)\n # construct perimeter\n stack = PointStack()\n orig = np.array([self.nodes[start_point][0], self.nodes[start_point][1], start_point])\n stack.push(orig)\n stack.push(nodes[0])\n stack.push(nodes[1])\n for i, node in enumerate(nodes):\n if i < 2:\n continue\n while len(stack) > 1 and ccw(stack.next_to_top(), stack.peek(), node) in [-1, 0]:\n a = stack.next_to_top()\n b = stack.peek()\n c = node\n ans = ccw(a, b, c)\n stack.pop()\n stack.push(node)\n # prepare return array\n ans = np.zeros((len(stack)), dtype=np.int)\n pointer = len(stack) - 1\n while not stack.isEmpty():\n idx = int(stack.pop()[2])\n ans[pointer] = idx\n pointer -= 1\n return ans\n\n\ndef ccw(p0, p1, p2):\n \"\"\"\n Judge whether p0p2 vector is ccw to p0p1 vector.\n\n Return value map: \\n\n 1: p0p2 is ccw to p0p1 (angle to x axis bigger) \\n\n 0: p0p2 and p0p1 on a same line \\n\n -1: p0p2 is cw to p0p1 (angle to x axis smaller) \\n\n\n Args:\n p0: base point index 0 and 1 is x and y value. [x, y, ...]\n p1: first point index 0 and 1 is x and y value. [x, y, ...]\n p2: second point index 0 and 1 is x and y value. [x, y, ...]\n\n Returns:\n int: judgement value -1 or 0 or 1\n \"\"\"\n comp = Comp(p0)\n return comp.compare_angle(p2, p1)\n" ]
[ [ "numpy.abs", "numpy.linalg.inv", "numpy.linalg.det", "numpy.copy", "numpy.mean", "numpy.shape", "numpy.array", "numpy.zeros" ] ]
NolanYuu/espnet
[ "73c43ac034d7a44a3bf1a10dc4079884df57de43" ]
[ "espnet2/tasks/abs_task.py" ]
[ "from abc import ABC\nfrom abc import abstractmethod\nimport argparse\nfrom dataclasses import dataclass\nfrom distutils.version import LooseVersion\nimport functools\nimport logging\nimport os\nfrom pathlib import Path\nimport sys\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\nfrom typing import Union\n\nimport humanfriendly\nimport numpy as np\nimport torch\nimport torch.multiprocessing\nimport torch.nn\nimport torch.optim\nfrom torch.utils.data import DataLoader\nfrom typeguard import check_argument_types\nfrom typeguard import check_return_type\nimport wandb\nimport yaml\n\nfrom espnet import __version__\nfrom espnet.utils.cli_utils import get_commandline_args\nfrom espnet2.iterators.abs_iter_factory import AbsIterFactory\nfrom espnet2.iterators.chunk_iter_factory import ChunkIterFactory\nfrom espnet2.iterators.multiple_iter_factory import MultipleIterFactory\nfrom espnet2.iterators.sequence_iter_factory import SequenceIterFactory\nfrom espnet2.main_funcs.collect_stats import collect_stats\nfrom espnet2.optimizers.sgd import SGD\nfrom espnet2.samplers.build_batch_sampler import BATCH_TYPES\nfrom espnet2.samplers.build_batch_sampler import build_batch_sampler\nfrom espnet2.samplers.unsorted_batch_sampler import UnsortedBatchSampler\nfrom espnet2.schedulers.noam_lr import NoamLR\nfrom espnet2.schedulers.warmup_lr import WarmupLR\nfrom espnet2.torch_utils.load_pretrained_model import load_pretrained_model\nfrom espnet2.torch_utils.model_summary import model_summary\nfrom espnet2.torch_utils.pytorch_version import pytorch_cudnn_version\nfrom espnet2.torch_utils.set_all_random_seed import set_all_random_seed\nfrom espnet2.train.abs_espnet_model import AbsESPnetModel\nfrom espnet2.train.class_choices import ClassChoices\nfrom espnet2.train.dataset import AbsDataset\nfrom espnet2.train.dataset import DATA_TYPES\nfrom espnet2.train.dataset import ESPnetDataset\nfrom espnet2.train.distributed_utils import DistributedOption\nfrom espnet2.train.distributed_utils import free_port\nfrom espnet2.train.distributed_utils import get_master_port\nfrom espnet2.train.distributed_utils import get_node_rank\nfrom espnet2.train.distributed_utils import get_num_nodes\nfrom espnet2.train.distributed_utils import resolve_distributed_mode\nfrom espnet2.train.iterable_dataset import IterableESPnetDataset\nfrom espnet2.train.trainer import Trainer\nfrom espnet2.utils.build_dataclass import build_dataclass\nfrom espnet2.utils import config_argparse\nfrom espnet2.utils.get_default_kwargs import get_default_kwargs\nfrom espnet2.utils.nested_dict_action import NestedDictAction\nfrom espnet2.utils.types import humanfriendly_parse_size_or_none\nfrom espnet2.utils.types import int_or_none\nfrom espnet2.utils.types import str2bool\nfrom espnet2.utils.types import str2triple_str\nfrom espnet2.utils.types import str_or_int\nfrom espnet2.utils.types import str_or_none\nfrom espnet2.utils.yaml_no_alias_safe_dump import yaml_no_alias_safe_dump\n\nif LooseVersion(torch.__version__) >= LooseVersion(\"1.5.0\"):\n from torch.multiprocessing.spawn import ProcessContext\nelse:\n from torch.multiprocessing.spawn import SpawnContext as ProcessContext\n\n\noptim_classes = dict(\n adam=torch.optim.Adam,\n sgd=SGD,\n adadelta=torch.optim.Adadelta,\n adagrad=torch.optim.Adagrad,\n adamax=torch.optim.Adamax,\n asgd=torch.optim.ASGD,\n lbfgs=torch.optim.LBFGS,\n rmsprop=torch.optim.RMSprop,\n rprop=torch.optim.Rprop,\n)\nif LooseVersion(torch.__version__) >= LooseVersion(\"1.2.0\"):\n optim_classes[\"adamw\"] = torch.optim.AdamW\ntry:\n import torch_optimizer\n\n optim_classes.update(\n accagd=torch_optimizer.AccSGD,\n adabound=torch_optimizer.AdaBound,\n adamod=torch_optimizer.AdaMod,\n diffgrad=torch_optimizer.DiffGrad,\n lamb=torch_optimizer.Lamb,\n novograd=torch_optimizer.NovoGrad,\n pid=torch_optimizer.PID,\n # torch_optimizer<=0.0.1a10 doesn't support\n # qhadam=torch_optimizer.QHAdam,\n qhm=torch_optimizer.QHM,\n radam=torch_optimizer.RAdam,\n sgdw=torch_optimizer.SGDW,\n yogi=torch_optimizer.Yogi,\n )\n del torch_optimizer\nexcept ImportError:\n pass\ntry:\n import apex\n\n optim_classes.update(\n fusedadam=apex.optimizers.FusedAdam,\n fusedlamb=apex.optimizers.FusedLAMB,\n fusednovograd=apex.optimizers.FusedNovoGrad,\n fusedsgd=apex.optimizers.FusedSGD,\n )\n del apex\nexcept ImportError:\n pass\ntry:\n import fairscale\nexcept ImportError:\n fairscale = None\n\n\nscheduler_classes = dict(\n ReduceLROnPlateau=torch.optim.lr_scheduler.ReduceLROnPlateau,\n lambdalr=torch.optim.lr_scheduler.LambdaLR,\n steplr=torch.optim.lr_scheduler.StepLR,\n multisteplr=torch.optim.lr_scheduler.MultiStepLR,\n exponentiallr=torch.optim.lr_scheduler.ExponentialLR,\n CosineAnnealingLR=torch.optim.lr_scheduler.CosineAnnealingLR,\n)\nif LooseVersion(torch.__version__) >= LooseVersion(\"1.1.0\"):\n scheduler_classes.update(\n noamlr=NoamLR,\n warmuplr=WarmupLR,\n )\nif LooseVersion(torch.__version__) >= LooseVersion(\"1.3.0\"):\n CosineAnnealingWarmRestarts = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts\n scheduler_classes.update(\n cycliclr=torch.optim.lr_scheduler.CyclicLR,\n onecyclelr=torch.optim.lr_scheduler.OneCycleLR,\n CosineAnnealingWarmRestarts=CosineAnnealingWarmRestarts,\n )\n# To lower keys\noptim_classes = {k.lower(): v for k, v in optim_classes.items()}\nscheduler_classes = {k.lower(): v for k, v in scheduler_classes.items()}\n\n\n@dataclass\nclass IteratorOptions:\n preprocess_fn: callable\n collate_fn: callable\n data_path_and_name_and_type: list\n shape_files: list\n batch_size: int\n batch_bins: int\n batch_type: str\n max_cache_size: float\n max_cache_fd: int\n distributed: bool\n num_batches: Optional[int]\n num_iters_per_epoch: Optional[int]\n train: bool\n\n\nclass AbsTask(ABC):\n # Use @staticmethod, or @classmethod,\n # instead of instance method to avoid God classes\n\n # If you need more than one optimizers, change this value in inheritance\n num_optimizers: int = 1\n trainer = Trainer\n class_choices_list: List[ClassChoices] = []\n\n def __init__(self):\n raise RuntimeError(\"This class can't be instantiated.\")\n\n @classmethod\n @abstractmethod\n def add_task_arguments(cls, parser: argparse.ArgumentParser):\n pass\n\n @classmethod\n @abstractmethod\n def build_collate_fn(\n cls, args: argparse.Namespace, train: bool\n ) -> Callable[[Sequence[Dict[str, np.ndarray]]], Dict[str, torch.Tensor]]:\n \"\"\"Return \"collate_fn\", which is a callable object and given to DataLoader.\n\n >>> from torch.utils.data import DataLoader\n >>> loader = DataLoader(collate_fn=cls.build_collate_fn(args, train=True), ...)\n\n In many cases, you can use our common collate_fn.\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n @abstractmethod\n def build_preprocess_fn(\n cls, args: argparse.Namespace, train: bool\n ) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:\n raise NotImplementedError\n\n @classmethod\n @abstractmethod\n def required_data_names(\n cls, train: bool = True, inference: bool = False\n ) -> Tuple[str, ...]:\n \"\"\"Define the required names by Task\n\n This function is used by\n >>> cls.check_task_requirements()\n If your model is defined as following,\n\n >>> from espnet2.train.abs_espnet_model import AbsESPnetModel\n >>> class Model(AbsESPnetModel):\n ... def forward(self, input, output, opt=None): pass\n\n then \"required_data_names\" should be as\n\n >>> required_data_names = ('input', 'output')\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n @abstractmethod\n def optional_data_names(\n cls, train: bool = True, inference: bool = False\n ) -> Tuple[str, ...]:\n \"\"\"Define the optional names by Task\n\n This function is used by\n >>> cls.check_task_requirements()\n If your model is defined as follows,\n\n >>> from espnet2.train.abs_espnet_model import AbsESPnetModel\n >>> class Model(AbsESPnetModel):\n ... def forward(self, input, output, opt=None): pass\n\n then \"optional_data_names\" should be as\n\n >>> optional_data_names = ('opt',)\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n @abstractmethod\n def build_model(cls, args: argparse.Namespace) -> AbsESPnetModel:\n raise NotImplementedError\n\n @classmethod\n def get_parser(cls) -> config_argparse.ArgumentParser:\n assert check_argument_types()\n\n class ArgumentDefaultsRawTextHelpFormatter(\n argparse.RawTextHelpFormatter,\n argparse.ArgumentDefaultsHelpFormatter,\n ):\n pass\n\n parser = config_argparse.ArgumentParser(\n description=\"base parser\",\n formatter_class=ArgumentDefaultsRawTextHelpFormatter,\n )\n\n # NOTE(kamo): Use '_' instead of '-' to avoid confusion.\n # I think '-' looks really confusing if it's written in yaml.\n\n # NOTE(kamo): add_arguments(..., required=True) can't be used\n # to provide --print_config mode. Instead of it, do as\n parser.set_defaults(required=[\"output_dir\"])\n\n group = parser.add_argument_group(\"Common configuration\")\n\n group.add_argument(\n \"--print_config\",\n action=\"store_true\",\n help=\"Print the config file and exit\",\n )\n group.add_argument(\n \"--log_level\",\n type=lambda x: x.upper(),\n default=\"INFO\",\n choices=(\"ERROR\", \"WARNING\", \"INFO\", \"DEBUG\", \"NOTSET\"),\n help=\"The verbose level of logging\",\n )\n group.add_argument(\n \"--dry_run\",\n type=str2bool,\n default=False,\n help=\"Perform process without training\",\n )\n group.add_argument(\n \"--iterator_type\",\n type=str,\n choices=[\"sequence\", \"chunk\", \"task\", \"none\"],\n default=\"sequence\",\n help=\"Specify iterator type\",\n )\n\n group.add_argument(\"--output_dir\", type=str_or_none, default=None)\n group.add_argument(\n \"--ngpu\",\n type=int,\n default=0,\n help=\"The number of gpus. 0 indicates CPU mode\",\n )\n group.add_argument(\"--seed\", type=int, default=0, help=\"Random seed\")\n group.add_argument(\n \"--num_workers\",\n type=int,\n default=1,\n help=\"The number of workers used for DataLoader\",\n )\n group.add_argument(\n \"--num_att_plot\",\n type=int,\n default=3,\n help=\"The number images to plot the outputs from attention. \"\n \"This option makes sense only when attention-based model\",\n )\n\n group = parser.add_argument_group(\"distributed training related\")\n group.add_argument(\n \"--dist_backend\",\n default=\"nccl\",\n type=str,\n help=\"distributed backend\",\n )\n group.add_argument(\n \"--dist_init_method\",\n type=str,\n default=\"env://\",\n help='if init_method=\"env://\", env values of \"MASTER_PORT\", \"MASTER_ADDR\", '\n '\"WORLD_SIZE\", and \"RANK\" are referred.',\n )\n group.add_argument(\n \"--dist_world_size\",\n default=None,\n type=int_or_none,\n help=\"number of nodes for distributed training\",\n )\n group.add_argument(\n \"--dist_rank\",\n type=int_or_none,\n default=None,\n help=\"node rank for distributed training\",\n )\n group.add_argument(\n # Not starting with \"dist_\" for compatibility to launch.py\n \"--local_rank\",\n type=int_or_none,\n default=None,\n help=\"local rank for distributed training. This option is used if \"\n \"--multiprocessing_distributed=false\",\n )\n group.add_argument(\n \"--dist_master_addr\",\n default=None,\n type=str_or_none,\n help=\"The master address for distributed training. \"\n \"This value is used when dist_init_method == 'env://'\",\n )\n group.add_argument(\n \"--dist_master_port\",\n default=None,\n type=int_or_none,\n help=\"The master port for distributed training\"\n \"This value is used when dist_init_method == 'env://'\",\n )\n group.add_argument(\n \"--dist_launcher\",\n default=None,\n type=str_or_none,\n choices=[\"slurm\", \"mpi\", None],\n help=\"The launcher type for distributed training\",\n )\n group.add_argument(\n \"--multiprocessing_distributed\",\n default=False,\n type=str2bool,\n help=\"Use multi-processing distributed training to launch \"\n \"N processes per node, which has N GPUs. This is the \"\n \"fastest way to use PyTorch for either single node or \"\n \"multi node data parallel training\",\n )\n group.add_argument(\n \"--unused_parameters\",\n type=str2bool,\n default=False,\n help=\"Whether to use the find_unused_parameters in \"\n \"torch.nn.parallel.DistributedDataParallel \",\n )\n group.add_argument(\n \"--sharded_ddp\",\n default=False,\n type=str2bool,\n help=\"Enable sharded training provided by fairscale\",\n )\n\n group = parser.add_argument_group(\"cudnn mode related\")\n group.add_argument(\n \"--cudnn_enabled\",\n type=str2bool,\n default=torch.backends.cudnn.enabled,\n help=\"Enable CUDNN\",\n )\n group.add_argument(\n \"--cudnn_benchmark\",\n type=str2bool,\n default=torch.backends.cudnn.benchmark,\n help=\"Enable cudnn-benchmark mode\",\n )\n group.add_argument(\n \"--cudnn_deterministic\",\n type=str2bool,\n default=True,\n help=\"Enable cudnn-deterministic mode\",\n )\n\n group = parser.add_argument_group(\"collect stats mode related\")\n group.add_argument(\n \"--collect_stats\",\n type=str2bool,\n default=False,\n help='Perform on \"collect stats\" mode',\n )\n group.add_argument(\n \"--write_collected_feats\",\n type=str2bool,\n default=False,\n help='Write the output features from the model when \"collect stats\" mode',\n )\n\n group = parser.add_argument_group(\"Trainer related\")\n group.add_argument(\n \"--max_epoch\",\n type=int,\n default=40,\n help=\"The maximum number epoch to train\",\n )\n group.add_argument(\n \"--patience\",\n type=int_or_none,\n default=None,\n help=\"Number of epochs to wait without improvement \"\n \"before stopping the training\",\n )\n group.add_argument(\n \"--val_scheduler_criterion\",\n type=str,\n nargs=2,\n default=(\"valid\", \"loss\"),\n help=\"The criterion used for the value given to the lr scheduler. \"\n 'Give a pair referring the phase, \"train\" or \"valid\",'\n 'and the criterion name. The mode specifying \"min\" or \"max\" can '\n \"be changed by --scheduler_conf\",\n )\n group.add_argument(\n \"--early_stopping_criterion\",\n type=str,\n nargs=3,\n default=(\"valid\", \"loss\", \"min\"),\n help=\"The criterion used for judging of early stopping. \"\n 'Give a pair referring the phase, \"train\" or \"valid\",'\n 'the criterion name and the mode, \"min\" or \"max\", e.g. \"acc,max\".',\n )\n group.add_argument(\n \"--best_model_criterion\",\n type=str2triple_str,\n nargs=\"+\",\n default=[\n (\"train\", \"loss\", \"min\"),\n (\"valid\", \"loss\", \"min\"),\n (\"train\", \"acc\", \"max\"),\n (\"valid\", \"acc\", \"max\"),\n ],\n help=\"The criterion used for judging of the best model. \"\n 'Give a pair referring the phase, \"train\" or \"valid\",'\n 'the criterion name, and the mode, \"min\" or \"max\", e.g. \"acc,max\".',\n )\n group.add_argument(\n \"--keep_nbest_models\",\n type=int,\n nargs=\"+\",\n default=[10],\n help=\"Remove previous snapshots excluding the n-best scored epochs\",\n )\n group.add_argument(\n \"--grad_clip\",\n type=float,\n default=5.0,\n help=\"Gradient norm threshold to clip\",\n )\n group.add_argument(\n \"--grad_clip_type\",\n type=float,\n default=2.0,\n help=\"The type of the used p-norm for gradient clip. Can be inf\",\n )\n group.add_argument(\n \"--grad_noise\",\n type=str2bool,\n default=False,\n help=\"The flag to switch to use noise injection to \"\n \"gradients during training\",\n )\n group.add_argument(\n \"--accum_grad\",\n type=int,\n default=1,\n help=\"The number of gradient accumulation\",\n )\n group.add_argument(\n \"--no_forward_run\",\n type=str2bool,\n default=False,\n help=\"Just only iterating data loading without \"\n \"model forwarding and training\",\n )\n group.add_argument(\n \"--resume\",\n type=str2bool,\n default=False,\n help=\"Enable resuming if checkpoint is existing\",\n )\n group.add_argument(\n \"--train_dtype\",\n default=\"float32\",\n choices=[\"float16\", \"float32\", \"float64\"],\n help=\"Data type for training.\",\n )\n group.add_argument(\n \"--use_amp\",\n type=str2bool,\n default=False,\n help=\"Enable Automatic Mixed Precision. This feature requires pytorch>=1.6\",\n )\n group.add_argument(\n \"--log_interval\",\n type=int_or_none,\n default=None,\n help=\"Show the logs every the number iterations in each epochs at the \"\n \"training phase. If None is given, it is decided according the number \"\n \"of training samples automatically .\",\n )\n group.add_argument(\n \"--use_tensorboard\",\n type=str2bool,\n default=True,\n help=\"Enable tensorboard logging\",\n )\n group.add_argument(\n \"--use_wandb\",\n type=str2bool,\n default=False,\n help=\"Enable wandb logging\",\n )\n group.add_argument(\n \"--wandb_project\",\n type=str,\n default=None,\n help=\"Specify wandb project\",\n )\n group.add_argument(\n \"--wandb_id\",\n type=str,\n default=None,\n help=\"Specify wandb id\",\n )\n group.add_argument(\n \"--detect_anomaly\",\n type=str2bool,\n default=False,\n help=\"Set torch.autograd.set_detect_anomaly\",\n )\n\n group = parser.add_argument_group(\"Pretraining model related\")\n group.add_argument(\"--pretrain_path\", help=\"This option is obsoleted\")\n group.add_argument(\n \"--init_param\",\n type=str,\n default=[],\n nargs=\"*\",\n help=\"Specify the file path used for initialization of parameters. \"\n \"The format is '<file_path>:<src_key>:<dst_key>:<exclude_keys>', \"\n \"where file_path is the model file path, \"\n \"src_key specifies the key of model states to be used in the model file, \"\n \"dst_key specifies the attribute of the model to be initialized, \"\n \"and exclude_keys excludes keys of model states for the initialization.\"\n \"e.g.\\n\"\n \" # Load all parameters\"\n \" --init_param some/where/model.pth\\n\"\n \" # Load only decoder parameters\"\n \" --init_param some/where/model.pth:decoder:decoder\\n\"\n \" # Load only decoder parameters excluding decoder.embed\"\n \" --init_param some/where/model.pth:decoder:decoder:decoder.embed\\n\"\n \" --init_param some/where/model.pth:decoder:decoder:decoder.embed\\n\",\n )\n group.add_argument(\n \"--ignore_init_mismatch\",\n type=str2bool,\n default=False,\n help=\"Ignore size mismatch when loading pre-trained model\",\n )\n group.add_argument(\n \"--freeze_param\",\n type=str,\n default=[],\n nargs=\"*\",\n help=\"Freeze parameters\",\n )\n\n group = parser.add_argument_group(\"BatchSampler related\")\n group.add_argument(\n \"--num_iters_per_epoch\",\n type=int_or_none,\n default=None,\n help=\"Restrict the number of iterations for training per epoch\",\n )\n group.add_argument(\n \"--batch_size\",\n type=int,\n default=20,\n help=\"The mini-batch size used for training. Used if batch_type='unsorted',\"\n \" 'sorted', or 'folded'.\",\n )\n group.add_argument(\n \"--valid_batch_size\",\n type=int_or_none,\n default=None,\n help=\"If not given, the value of --batch_size is used\",\n )\n group.add_argument(\n \"--batch_bins\",\n type=int,\n default=1000000,\n help=\"The number of batch bins. Used if batch_type='length' or 'numel'\",\n )\n group.add_argument(\n \"--valid_batch_bins\",\n type=int_or_none,\n default=None,\n help=\"If not given, the value of --batch_bins is used\",\n )\n\n group.add_argument(\"--train_shape_file\", type=str, action=\"append\", default=[])\n group.add_argument(\"--valid_shape_file\", type=str, action=\"append\", default=[])\n\n group = parser.add_argument_group(\"Sequence iterator related\")\n _batch_type_help = \"\"\n for key, value in BATCH_TYPES.items():\n _batch_type_help += f'\"{key}\":\\n{value}\\n'\n group.add_argument(\n \"--batch_type\",\n type=str,\n default=\"folded\",\n choices=list(BATCH_TYPES),\n help=_batch_type_help,\n )\n group.add_argument(\n \"--valid_batch_type\",\n type=str_or_none,\n default=None,\n choices=list(BATCH_TYPES) + [None],\n help=\"If not given, the value of --batch_type is used\",\n )\n group.add_argument(\"--fold_length\", type=int, action=\"append\", default=[])\n group.add_argument(\n \"--sort_in_batch\",\n type=str,\n default=\"descending\",\n choices=[\"descending\", \"ascending\"],\n help=\"Sort the samples in each mini-batches by the sample \"\n 'lengths. To enable this, \"shape_file\" must have the length information.',\n )\n group.add_argument(\n \"--sort_batch\",\n type=str,\n default=\"descending\",\n choices=[\"descending\", \"ascending\"],\n help=\"Sort mini-batches by the sample lengths\",\n )\n group.add_argument(\n \"--multiple_iterator\",\n type=str2bool,\n default=False,\n help=\"Use multiple iterator mode\",\n )\n\n group = parser.add_argument_group(\"Chunk iterator related\")\n group.add_argument(\n \"--chunk_length\",\n type=str_or_int,\n default=500,\n help=\"Specify chunk length. e.g. '300', '300,400,500', or '300-400'.\"\n \"If multiple numbers separated by command are given, \"\n \"one of them is selected randomly for each samples. \"\n \"If two numbers are given with '-', it indicates the range of the choices. \"\n \"Note that if the sequence length is shorter than the all chunk_lengths, \"\n \"the sample is discarded. \",\n )\n group.add_argument(\n \"--chunk_shift_ratio\",\n type=float,\n default=0.5,\n help=\"Specify the shift width of chunks. If it's less than 1, \"\n \"allows the overlapping and if bigger than 1, there are some gaps \"\n \"between each chunk.\",\n )\n group.add_argument(\n \"--num_cache_chunks\",\n type=int,\n default=1024,\n help=\"Shuffle in the specified number of chunks and generate mini-batches \"\n \"More larger this value, more randomness can be obtained.\",\n )\n\n group = parser.add_argument_group(\"Dataset related\")\n _data_path_and_name_and_type_help = (\n \"Give three words splitted by comma. It's used for the training data. \"\n \"e.g. '--train_data_path_and_name_and_type some/path/a.scp,foo,sound'. \"\n \"The first value, some/path/a.scp, indicates the file path, \"\n \"and the second, foo, is the key name used for the mini-batch data, \"\n \"and the last, sound, decides the file type. \"\n \"This option is repeatable, so you can input any number of features \"\n \"for your task. Supported file types are as follows:\\n\\n\"\n )\n for key, dic in DATA_TYPES.items():\n _data_path_and_name_and_type_help += f'\"{key}\":\\n{dic[\"help\"]}\\n\\n'\n\n group.add_argument(\n \"--train_data_path_and_name_and_type\",\n type=str2triple_str,\n action=\"append\",\n default=[],\n help=_data_path_and_name_and_type_help,\n )\n group.add_argument(\n \"--valid_data_path_and_name_and_type\",\n type=str2triple_str,\n action=\"append\",\n default=[],\n )\n group.add_argument(\n \"--allow_variable_data_keys\",\n type=str2bool,\n default=False,\n help=\"Allow the arbitrary keys for mini-batch with ignoring \"\n \"the task requirements\",\n )\n group.add_argument(\n \"--max_cache_size\",\n type=humanfriendly.parse_size,\n default=0.0,\n help=\"The maximum cache size for data loader. e.g. 10MB, 20GB.\",\n )\n group.add_argument(\n \"--max_cache_fd\",\n type=int,\n default=32,\n help=\"The maximum number of file descriptors to be kept \"\n \"as opened for ark files. \"\n \"This feature is only valid when data type is 'kaldi_ark'.\",\n )\n group.add_argument(\n \"--valid_max_cache_size\",\n type=humanfriendly_parse_size_or_none,\n default=None,\n help=\"The maximum cache size for validation data loader. e.g. 10MB, 20GB. \"\n \"If None, the 5 percent size of --max_cache_size\",\n )\n\n group = parser.add_argument_group(\"Optimizer related\")\n for i in range(1, cls.num_optimizers + 1):\n suf = \"\" if i == 1 else str(i)\n group.add_argument(\n f\"--optim{suf}\",\n type=lambda x: x.lower(),\n default=\"adadelta\",\n choices=list(optim_classes),\n help=\"The optimizer type\",\n )\n group.add_argument(\n f\"--optim{suf}_conf\",\n action=NestedDictAction,\n default=dict(),\n help=\"The keyword arguments for optimizer\",\n )\n group.add_argument(\n f\"--scheduler{suf}\",\n type=lambda x: str_or_none(x.lower()),\n default=None,\n choices=list(scheduler_classes) + [None],\n help=\"The lr scheduler type\",\n )\n group.add_argument(\n f\"--scheduler{suf}_conf\",\n action=NestedDictAction,\n default=dict(),\n help=\"The keyword arguments for lr scheduler\",\n )\n\n cls.trainer.add_arguments(parser)\n cls.add_task_arguments(parser)\n\n assert check_return_type(parser)\n return parser\n\n @classmethod\n def build_optimizers(\n cls,\n args: argparse.Namespace,\n model: torch.nn.Module,\n ) -> List[torch.optim.Optimizer]:\n if cls.num_optimizers != 1:\n raise RuntimeError(\n \"build_optimizers() must be overridden if num_optimizers != 1\"\n )\n\n optim_class = optim_classes.get(args.optim)\n if optim_class is None:\n raise ValueError(f\"must be one of {list(optim_classes)}: {args.optim}\")\n if args.sharded_ddp:\n if fairscale is None:\n raise RuntimeError(\"Requiring fairscale. Do 'pip install fairscale'\")\n optim = fairscale.optim.oss.OSS(\n params=model.parameters(), optim=optim_class, **args.optim_conf\n )\n else:\n optim = optim_class(filter(lambda p: p.requires_grad, model.parameters()), **args.optim_conf)\n\n optimizers = [optim]\n return optimizers\n\n @classmethod\n def exclude_opts(cls) -> Tuple[str, ...]:\n \"\"\"The options not to be shown by --print_config\"\"\"\n return \"required\", \"print_config\", \"config\", \"ngpu\"\n\n @classmethod\n def get_default_config(cls) -> Dict[str, Any]:\n \"\"\"Return the configuration as dict.\n\n This method is used by print_config()\n \"\"\"\n\n def get_class_type(name: str, classes: dict):\n _cls = classes.get(name)\n if _cls is None:\n raise ValueError(f\"must be one of {list(classes)}: {name}\")\n return _cls\n\n # This method is used only for --print_config\n assert check_argument_types()\n parser = cls.get_parser()\n args, _ = parser.parse_known_args()\n config = vars(args)\n # Excludes the options not to be shown\n for k in AbsTask.exclude_opts():\n config.pop(k)\n\n for i in range(1, cls.num_optimizers + 1):\n suf = \"\" if i == 1 else str(i)\n name = config[f\"optim{suf}\"]\n optim_class = get_class_type(name, optim_classes)\n conf = get_default_kwargs(optim_class)\n # Overwrite the default by the arguments,\n conf.update(config[f\"optim{suf}_conf\"])\n # and set it again\n config[f\"optim{suf}_conf\"] = conf\n\n name = config[f\"scheduler{suf}\"]\n if name is not None:\n scheduler_class = get_class_type(name, scheduler_classes)\n conf = get_default_kwargs(scheduler_class)\n # Overwrite the default by the arguments,\n conf.update(config[f\"scheduler{suf}_conf\"])\n # and set it again\n config[f\"scheduler{suf}_conf\"] = conf\n\n for class_choices in cls.class_choices_list:\n if getattr(args, class_choices.name) is not None:\n class_obj = class_choices.get_class(getattr(args, class_choices.name))\n conf = get_default_kwargs(class_obj)\n name = class_choices.name\n # Overwrite the default by the arguments,\n conf.update(config[f\"{name}_conf\"])\n # and set it again\n config[f\"{name}_conf\"] = conf\n return config\n\n @classmethod\n def check_required_command_args(cls, args: argparse.Namespace):\n assert check_argument_types()\n for k in vars(args):\n if \"-\" in k:\n raise RuntimeError(f'Use \"_\" instead of \"-\": parser.get_parser(\"{k}\")')\n\n required = \", \".join(\n f\"--{a}\" for a in args.required if getattr(args, a) is None\n )\n\n if len(required) != 0:\n parser = cls.get_parser()\n parser.print_help(file=sys.stderr)\n p = Path(sys.argv[0]).name\n print(file=sys.stderr)\n print(\n f\"{p}: error: the following arguments are required: \" f\"{required}\",\n file=sys.stderr,\n )\n sys.exit(2)\n\n @classmethod\n def check_task_requirements(\n cls,\n dataset: Union[AbsDataset, IterableESPnetDataset],\n allow_variable_data_keys: bool,\n train: bool,\n inference: bool = False,\n ) -> None:\n \"\"\"Check if the dataset satisfy the requirement of current Task\"\"\"\n assert check_argument_types()\n mes = (\n f\"If you intend to use an additional input, modify \"\n f'\"{cls.__name__}.required_data_names()\" or '\n f'\"{cls.__name__}.optional_data_names()\". '\n f\"Otherwise you need to set --allow_variable_data_keys true \"\n )\n\n for k in cls.required_data_names(train, inference):\n if not dataset.has_name(k):\n raise RuntimeError(\n f'\"{cls.required_data_names(train, inference)}\" are required for'\n f' {cls.__name__}. but \"{dataset.names()}\" are input.\\n{mes}'\n )\n if not allow_variable_data_keys:\n task_keys = cls.required_data_names(\n train, inference\n ) + cls.optional_data_names(train, inference)\n for k in dataset.names():\n if k not in task_keys:\n raise RuntimeError(\n f\"The data-name must be one of {task_keys} \"\n f'for {cls.__name__}: \"{k}\" is not allowed.\\n{mes}'\n )\n\n @classmethod\n def print_config(cls, file=sys.stdout) -> None:\n assert check_argument_types()\n # Shows the config: e.g. python train.py asr --print_config\n config = cls.get_default_config()\n file.write(yaml_no_alias_safe_dump(config, indent=4, sort_keys=False))\n\n @classmethod\n def main(cls, args: argparse.Namespace = None, cmd: Sequence[str] = None):\n assert check_argument_types()\n print(get_commandline_args(), file=sys.stderr)\n if args is None:\n parser = cls.get_parser()\n args = parser.parse_args(cmd)\n args.version = __version__\n if args.pretrain_path is not None:\n raise RuntimeError(\"--pretrain_path is deprecated. Use --init_param\")\n if args.print_config:\n cls.print_config()\n sys.exit(0)\n cls.check_required_command_args(args)\n\n # \"distributed\" is decided using the other command args\n resolve_distributed_mode(args)\n if not args.distributed or not args.multiprocessing_distributed:\n cls.main_worker(args)\n\n else:\n assert args.ngpu > 1, args.ngpu\n # Multi-processing distributed mode: e.g. 2node-4process-4GPU\n # | Host1 | Host2 |\n # | Process1 | Process2 | <= Spawn processes\n # |Child1|Child2|Child1|Child2|\n # |GPU1 |GPU2 |GPU1 |GPU2 |\n\n # See also the following usage of --multiprocessing-distributed:\n # https://github.com/pytorch/examples/blob/master/imagenet/main.py\n num_nodes = get_num_nodes(args.dist_world_size, args.dist_launcher)\n if num_nodes == 1:\n args.dist_master_addr = \"localhost\"\n args.dist_rank = 0\n # Single node distributed training with multi-GPUs\n if (\n args.dist_init_method == \"env://\"\n and get_master_port(args.dist_master_port) is None\n ):\n # Get the unused port\n args.dist_master_port = free_port()\n\n # Assume that nodes use same number of GPUs each other\n args.dist_world_size = args.ngpu * num_nodes\n node_rank = get_node_rank(args.dist_rank, args.dist_launcher)\n\n # The following block is copied from:\n # https://github.com/pytorch/pytorch/blob/master/torch/multiprocessing/spawn.py\n error_queues = []\n processes = []\n mp = torch.multiprocessing.get_context(\"spawn\")\n for i in range(args.ngpu):\n # Copy args\n local_args = argparse.Namespace(**vars(args))\n\n local_args.local_rank = i\n local_args.dist_rank = args.ngpu * node_rank + i\n local_args.ngpu = 1\n\n process = mp.Process(\n target=cls.main_worker,\n args=(local_args,),\n daemon=False,\n )\n process.start()\n processes.append(process)\n error_queues.append(mp.SimpleQueue())\n # Loop on join until it returns True or raises an exception.\n while not ProcessContext(processes, error_queues).join():\n pass\n\n @classmethod\n def main_worker(cls, args: argparse.Namespace):\n assert check_argument_types()\n\n # 0. Init distributed process\n distributed_option = build_dataclass(DistributedOption, args)\n # Setting distributed_option.dist_rank, etc.\n distributed_option.init_options()\n\n # NOTE(kamo): Don't use logging before invoking logging.basicConfig()\n if not distributed_option.distributed or distributed_option.dist_rank == 0:\n if not distributed_option.distributed:\n _rank = \"\"\n else:\n _rank = (\n f\":{distributed_option.dist_rank}/\"\n f\"{distributed_option.dist_world_size}\"\n )\n\n # NOTE(kamo):\n # logging.basicConfig() is invoked in main_worker() instead of main()\n # because it can be invoked only once in a process.\n # FIXME(kamo): Should we use logging.getLogger()?\n logging.basicConfig(\n level=args.log_level,\n format=f\"[{os.uname()[1].split('.')[0]}{_rank}]\"\n f\" %(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\",\n )\n else:\n # Suppress logging if RANK != 0\n logging.basicConfig(\n level=\"ERROR\",\n format=f\"[{os.uname()[1].split('.')[0]}\"\n f\":{distributed_option.dist_rank}/{distributed_option.dist_world_size}]\"\n f\" %(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\",\n )\n # Invoking torch.distributed.init_process_group\n distributed_option.init_torch_distributed()\n\n # 1. Set random-seed\n set_all_random_seed(args.seed)\n torch.backends.cudnn.enabled = args.cudnn_enabled\n torch.backends.cudnn.benchmark = args.cudnn_benchmark\n torch.backends.cudnn.deterministic = args.cudnn_deterministic\n if args.detect_anomaly:\n logging.info(\"Invoking torch.autograd.set_detect_anomaly(True)\")\n torch.autograd.set_detect_anomaly(args.detect_anomaly)\n\n # 2. Build model\n model = cls.build_model(args=args)\n if not isinstance(model, AbsESPnetModel):\n raise RuntimeError(\n f\"model must inherit {AbsESPnetModel.__name__}, but got {type(model)}\"\n )\n model = model.to(\n dtype=getattr(torch, args.train_dtype),\n device=\"cuda\" if args.ngpu > 0 else \"cpu\",\n )\n for t in args.freeze_param:\n for k, p in model.named_parameters():\n if k.startswith(t + \".\") or k == t:\n logging.info(f\"Setting {k}.requires_grad = False\")\n p.requires_grad = False\n\n # 3. Build optimizer\n optimizers = cls.build_optimizers(args, model=model)\n\n # 4. Build schedulers\n schedulers = []\n for i, optim in enumerate(optimizers, 1):\n suf = \"\" if i == 1 else str(i)\n name = getattr(args, f\"scheduler{suf}\")\n conf = getattr(args, f\"scheduler{suf}_conf\")\n if name is not None:\n cls_ = scheduler_classes.get(name)\n if cls_ is None:\n raise ValueError(\n f\"must be one of {list(scheduler_classes)}: {name}\"\n )\n scheduler = cls_(optim, **conf)\n else:\n scheduler = None\n\n schedulers.append(scheduler)\n\n logging.info(pytorch_cudnn_version())\n logging.info(model_summary(model))\n for i, (o, s) in enumerate(zip(optimizers, schedulers), 1):\n suf = \"\" if i == 1 else str(i)\n logging.info(f\"Optimizer{suf}:\\n{o}\")\n logging.info(f\"Scheduler{suf}: {s}\")\n\n # 5. Dump \"args\" to config.yaml\n # NOTE(kamo): \"args\" should be saved after object-buildings are done\n # because they are allowed to modify \"args\".\n output_dir = Path(args.output_dir)\n if not distributed_option.distributed or distributed_option.dist_rank == 0:\n output_dir.mkdir(parents=True, exist_ok=True)\n with (output_dir / \"config.yaml\").open(\"w\", encoding=\"utf-8\") as f:\n logging.info(\n f'Saving the configuration in {output_dir / \"config.yaml\"}'\n )\n yaml_no_alias_safe_dump(vars(args), f, indent=4, sort_keys=False)\n\n # 6. Loads pre-trained model\n for p in args.init_param:\n logging.info(f\"Loading pretrained params from {p}\")\n load_pretrained_model(\n model=model,\n init_param=p,\n ignore_init_mismatch=args.ignore_init_mismatch,\n # NOTE(kamo): \"cuda\" for torch.load always indicates cuda:0\n # in PyTorch<=1.4\n map_location=f\"cuda:{torch.cuda.current_device()}\"\n if args.ngpu > 0\n else \"cpu\",\n )\n\n if args.dry_run:\n pass\n elif args.collect_stats:\n # Perform on collect_stats mode. This mode has two roles\n # - Derive the length and dimension of all input data\n # - Accumulate feats, square values, and the length for whitening\n logging.info(args)\n\n if args.valid_batch_size is None:\n args.valid_batch_size = args.batch_size\n\n if len(args.train_shape_file) != 0:\n train_key_file = args.train_shape_file[0]\n else:\n train_key_file = None\n if len(args.valid_shape_file) != 0:\n valid_key_file = args.valid_shape_file[0]\n else:\n valid_key_file = None\n\n collect_stats(\n model=model,\n train_iter=cls.build_streaming_iterator(\n data_path_and_name_and_type=args.train_data_path_and_name_and_type,\n key_file=train_key_file,\n batch_size=args.batch_size,\n dtype=args.train_dtype,\n num_workers=args.num_workers,\n allow_variable_data_keys=args.allow_variable_data_keys,\n ngpu=args.ngpu,\n preprocess_fn=cls.build_preprocess_fn(args, train=False),\n collate_fn=cls.build_collate_fn(args, train=False),\n ),\n valid_iter=cls.build_streaming_iterator(\n data_path_and_name_and_type=args.valid_data_path_and_name_and_type,\n key_file=valid_key_file,\n batch_size=args.valid_batch_size,\n dtype=args.train_dtype,\n num_workers=args.num_workers,\n allow_variable_data_keys=args.allow_variable_data_keys,\n ngpu=args.ngpu,\n preprocess_fn=cls.build_preprocess_fn(args, train=False),\n collate_fn=cls.build_collate_fn(args, train=False),\n ),\n output_dir=output_dir,\n ngpu=args.ngpu,\n log_interval=args.log_interval,\n write_collected_feats=args.write_collected_feats,\n )\n else:\n\n # 7. Build iterator factories\n if args.multiple_iterator:\n train_iter_factory = cls.build_multiple_iter_factory(\n args=args,\n distributed_option=distributed_option,\n mode=\"train\",\n )\n else:\n train_iter_factory = cls.build_iter_factory(\n args=args,\n distributed_option=distributed_option,\n mode=\"train\",\n )\n valid_iter_factory = cls.build_iter_factory(\n args=args,\n distributed_option=distributed_option,\n mode=\"valid\",\n )\n if args.num_att_plot != 0:\n plot_attention_iter_factory = cls.build_iter_factory(\n args=args,\n distributed_option=distributed_option,\n mode=\"plot_att\",\n )\n else:\n plot_attention_iter_factory = None\n\n # 8. Start training\n if args.use_wandb:\n if (\n not distributed_option.distributed\n or distributed_option.dist_rank == 0\n ):\n if args.wandb_project is None:\n project = (\n \"ESPnet_\"\n + cls.__name__\n + str(Path(\".\").resolve()).replace(\"/\", \"_\")\n )\n else:\n project = args.wandb_project\n if args.wandb_id is None:\n wandb_id = str(output_dir).replace(\"/\", \"_\")\n else:\n wandb_id = args.wandb_id\n\n wandb.init(\n project=project,\n dir=output_dir,\n id=wandb_id,\n resume=\"allow\",\n )\n wandb.config.update(args)\n else:\n # wandb also supports grouping for distributed training,\n # but we only logs aggregated data,\n # so it's enough to perform on rank0 node.\n args.use_wandb = False\n\n # Don't give args to trainer.run() directly!!!\n # Instead of it, define \"Options\" object and build here.\n trainer_options = cls.trainer.build_options(args)\n cls.trainer.run(\n model=model,\n optimizers=optimizers,\n schedulers=schedulers,\n train_iter_factory=train_iter_factory,\n valid_iter_factory=valid_iter_factory,\n plot_attention_iter_factory=plot_attention_iter_factory,\n trainer_options=trainer_options,\n distributed_option=distributed_option,\n )\n\n @classmethod\n def build_iter_options(\n cls,\n args: argparse.Namespace,\n distributed_option: DistributedOption,\n mode: str,\n ):\n if mode == \"train\":\n preprocess_fn = cls.build_preprocess_fn(args, train=True)\n collate_fn = cls.build_collate_fn(args, train=True)\n data_path_and_name_and_type = args.train_data_path_and_name_and_type\n shape_files = args.train_shape_file\n batch_size = args.batch_size\n batch_bins = args.batch_bins\n batch_type = args.batch_type\n max_cache_size = args.max_cache_size\n max_cache_fd = args.max_cache_fd\n distributed = distributed_option.distributed\n num_batches = None\n num_iters_per_epoch = args.num_iters_per_epoch\n train = True\n\n elif mode == \"valid\":\n preprocess_fn = cls.build_preprocess_fn(args, train=False)\n collate_fn = cls.build_collate_fn(args, train=False)\n data_path_and_name_and_type = args.valid_data_path_and_name_and_type\n shape_files = args.valid_shape_file\n\n if args.valid_batch_type is None:\n batch_type = args.batch_type\n else:\n batch_type = args.valid_batch_type\n if args.valid_batch_size is None:\n batch_size = args.batch_size\n else:\n batch_size = args.valid_batch_size\n if args.valid_batch_bins is None:\n batch_bins = args.batch_bins\n else:\n batch_bins = args.valid_batch_bins\n if args.valid_max_cache_size is None:\n # Cache 5% of maximum size for validation loader\n max_cache_size = 0.05 * args.max_cache_size\n else:\n max_cache_size = args.valid_max_cache_size\n max_cache_fd = args.max_cache_fd\n distributed = distributed_option.distributed\n num_batches = None\n num_iters_per_epoch = None\n train = False\n\n elif mode == \"plot_att\":\n preprocess_fn = cls.build_preprocess_fn(args, train=False)\n collate_fn = cls.build_collate_fn(args, train=False)\n data_path_and_name_and_type = args.valid_data_path_and_name_and_type\n shape_files = args.valid_shape_file\n batch_type = \"unsorted\"\n batch_size = 1\n batch_bins = 0\n num_batches = args.num_att_plot\n max_cache_fd = args.max_cache_fd\n # num_att_plot should be a few sample ~ 3, so cache all data.\n max_cache_size = np.inf if args.max_cache_size != 0.0 else 0.0\n # always False because plot_attention performs on RANK0\n distributed = False\n num_iters_per_epoch = None\n train = False\n else:\n raise NotImplementedError(f\"mode={mode}\")\n\n return IteratorOptions(\n preprocess_fn=preprocess_fn,\n collate_fn=collate_fn,\n data_path_and_name_and_type=data_path_and_name_and_type,\n shape_files=shape_files,\n batch_type=batch_type,\n batch_size=batch_size,\n batch_bins=batch_bins,\n num_batches=num_batches,\n max_cache_size=max_cache_size,\n max_cache_fd=max_cache_fd,\n distributed=distributed,\n num_iters_per_epoch=num_iters_per_epoch,\n train=train,\n )\n\n @classmethod\n def build_iter_factory(\n cls,\n args: argparse.Namespace,\n distributed_option: DistributedOption,\n mode: str,\n kwargs: dict = None,\n ) -> AbsIterFactory:\n \"\"\"Build a factory object of mini-batch iterator.\n\n This object is invoked at every epochs to build the iterator for each epoch\n as following:\n\n >>> iter_factory = cls.build_iter_factory(...)\n >>> for epoch in range(1, max_epoch):\n ... for keys, batch in iter_fatory.build_iter(epoch):\n ... model(**batch)\n\n The mini-batches for each epochs are fully controlled by this class.\n Note that the random seed used for shuffling is decided as \"seed + epoch\" and\n the generated mini-batches can be reproduces when resuming.\n\n Note that the definition of \"epoch\" doesn't always indicate\n to run out of the whole training corpus.\n \"--num_iters_per_epoch\" option restricts the number of iterations for each epoch\n and the rest of samples for the originally epoch are left for the next epoch.\n e.g. If The number of mini-batches equals to 4, the following two are same:\n\n - 1 epoch without \"--num_iters_per_epoch\"\n - 4 epoch with \"--num_iters_per_epoch\" == 4\n\n \"\"\"\n assert check_argument_types()\n iter_options = cls.build_iter_options(args, distributed_option, mode)\n\n # Overwrite iter_options if any kwargs is given\n if kwargs is not None:\n for k, v in kwargs.items():\n setattr(iter_options, k, v)\n\n if args.iterator_type == \"sequence\":\n return cls.build_sequence_iter_factory(\n args=args,\n iter_options=iter_options,\n mode=mode,\n )\n elif args.iterator_type == \"chunk\":\n return cls.build_chunk_iter_factory(\n args=args,\n iter_options=iter_options,\n mode=mode,\n )\n elif args.iterator_type == \"task\":\n return cls.build_task_iter_factory(\n args=args,\n iter_options=iter_options,\n mode=mode,\n )\n else:\n raise RuntimeError(f\"Not supported: iterator_type={args.iterator_type}\")\n\n @classmethod\n def build_sequence_iter_factory(\n cls, args: argparse.Namespace, iter_options: IteratorOptions, mode: str\n ) -> AbsIterFactory:\n assert check_argument_types()\n\n dataset = ESPnetDataset(\n iter_options.data_path_and_name_and_type,\n float_dtype=args.train_dtype,\n preprocess=iter_options.preprocess_fn,\n max_cache_size=iter_options.max_cache_size,\n max_cache_fd=iter_options.max_cache_fd,\n )\n cls.check_task_requirements(\n dataset, args.allow_variable_data_keys, train=iter_options.train\n )\n\n if Path(\n Path(iter_options.data_path_and_name_and_type[0][0]).parent, \"utt2category\"\n ).exists():\n utt2category_file = str(\n Path(\n Path(iter_options.data_path_and_name_and_type[0][0]).parent,\n \"utt2category\",\n )\n )\n else:\n utt2category_file = None\n batch_sampler = build_batch_sampler(\n type=iter_options.batch_type,\n shape_files=iter_options.shape_files,\n fold_lengths=args.fold_length,\n batch_size=iter_options.batch_size,\n batch_bins=iter_options.batch_bins,\n sort_in_batch=args.sort_in_batch,\n sort_batch=args.sort_batch,\n drop_last=False,\n min_batch_size=torch.distributed.get_world_size()\n if iter_options.distributed\n else 1,\n utt2category_file=utt2category_file,\n )\n\n batches = list(batch_sampler)\n if iter_options.num_batches is not None:\n batches = batches[: iter_options.num_batches]\n\n bs_list = [len(batch) for batch in batches]\n\n logging.info(f\"[{mode}] dataset:\\n{dataset}\")\n logging.info(f\"[{mode}] Batch sampler: {batch_sampler}\")\n logging.info(\n f\"[{mode}] mini-batch sizes summary: N-batch={len(bs_list)}, \"\n f\"mean={np.mean(bs_list):.1f}, min={np.min(bs_list)}, max={np.max(bs_list)}\"\n )\n\n if iter_options.distributed:\n world_size = torch.distributed.get_world_size()\n rank = torch.distributed.get_rank()\n for batch in batches:\n if len(batch) < world_size:\n raise RuntimeError(\n f\"The batch-size must be equal or more than world_size: \"\n f\"{len(batch)} < {world_size}\"\n )\n batches = [batch[rank::world_size] for batch in batches]\n\n return SequenceIterFactory(\n dataset=dataset,\n batches=batches,\n seed=args.seed,\n num_iters_per_epoch=iter_options.num_iters_per_epoch,\n shuffle=iter_options.train,\n num_workers=args.num_workers,\n collate_fn=iter_options.collate_fn,\n pin_memory=args.ngpu > 0,\n )\n\n @classmethod\n def build_chunk_iter_factory(\n cls,\n args: argparse.Namespace,\n iter_options: IteratorOptions,\n mode: str,\n ) -> AbsIterFactory:\n assert check_argument_types()\n\n dataset = ESPnetDataset(\n iter_options.data_path_and_name_and_type,\n float_dtype=args.train_dtype,\n preprocess=iter_options.preprocess_fn,\n max_cache_size=iter_options.max_cache_size,\n max_cache_fd=iter_options.max_cache_fd,\n )\n cls.check_task_requirements(\n dataset, args.allow_variable_data_keys, train=iter_options.train\n )\n\n if len(iter_options.shape_files) == 0:\n key_file = iter_options.data_path_and_name_and_type[0][0]\n else:\n key_file = iter_options.shape_files[0]\n\n batch_sampler = UnsortedBatchSampler(batch_size=1, key_file=key_file)\n batches = list(batch_sampler)\n if iter_options.num_batches is not None:\n batches = batches[: iter_options.num_batches]\n logging.info(f\"[{mode}] dataset:\\n{dataset}\")\n\n if iter_options.distributed:\n world_size = torch.distributed.get_world_size()\n rank = torch.distributed.get_rank()\n if len(batches) < world_size:\n raise RuntimeError(\"Number of samples is smaller than world_size\")\n if iter_options.batch_size < world_size:\n raise RuntimeError(\"batch_size must be equal or more than world_size\")\n\n if rank < iter_options.batch_size % world_size:\n batch_size = iter_options.batch_size // world_size + 1\n else:\n batch_size = iter_options.batch_size // world_size\n num_cache_chunks = args.num_cache_chunks // world_size\n # NOTE(kamo): Split whole corpus by sample numbers without considering\n # each of the lengths, therefore the number of iteration counts are not\n # always equal to each other and the iterations are limitted\n # by the fewest iterations.\n # i.e. the samples over the counts are discarded.\n batches = batches[rank::world_size]\n else:\n batch_size = iter_options.batch_size\n num_cache_chunks = args.num_cache_chunks\n\n return ChunkIterFactory(\n dataset=dataset,\n batches=batches,\n seed=args.seed,\n batch_size=batch_size,\n # For chunk iterator,\n # --num_iters_per_epoch doesn't indicate the number of iterations,\n # but indicates the number of samples.\n num_samples_per_epoch=iter_options.num_iters_per_epoch,\n shuffle=iter_options.train,\n num_workers=args.num_workers,\n collate_fn=iter_options.collate_fn,\n pin_memory=args.ngpu > 0,\n chunk_length=args.chunk_length,\n chunk_shift_ratio=args.chunk_shift_ratio,\n num_cache_chunks=num_cache_chunks,\n )\n\n # NOTE(kamo): Not abstract class\n @classmethod\n def build_task_iter_factory(\n cls,\n args: argparse.Namespace,\n iter_options: IteratorOptions,\n mode: str,\n ) -> AbsIterFactory:\n \"\"\"Build task specific iterator factory\n\n Example:\n\n >>> class YourTask(AbsTask):\n ... @classmethod\n ... def add_task_arguments(cls, parser: argparse.ArgumentParser):\n ... parser.set_defaults(iterator_type=\"task\")\n ...\n ... @classmethod\n ... def build_task_iter_factory(\n ... cls,\n ... args: argparse.Namespace,\n ... iter_options: IteratorOptions,\n ... mode: str,\n ... ):\n ... return FooIterFactory(...)\n ...\n ... @classmethod\n ... def build_iter_options(\n .... args: argparse.Namespace,\n ... distributed_option: DistributedOption,\n ... mode: str\n ... ):\n ... # if you need to customize options object\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n def build_multiple_iter_factory(\n cls, args: argparse.Namespace, distributed_option: DistributedOption, mode: str\n ):\n assert check_argument_types()\n iter_options = cls.build_iter_options(args, distributed_option, mode)\n assert len(iter_options.data_path_and_name_and_type) > 0, len(\n iter_options.data_path_and_name_and_type\n )\n\n # 1. Sanity check\n num_splits = None\n for path in [\n path for path, _, _ in iter_options.data_path_and_name_and_type\n ] + list(iter_options.shape_files):\n if not Path(path).is_dir():\n raise RuntimeError(f\"{path} is not a directory\")\n p = Path(path) / \"num_splits\"\n if not p.exists():\n raise FileNotFoundError(f\"{p} is not found\")\n with p.open() as f:\n _num_splits = int(f.read())\n if num_splits is not None and num_splits != _num_splits:\n raise RuntimeError(\n f\"Number of splits are mismathed: \"\n f\"{iter_options.data_path_and_name_and_type[0][0]} and {path}\"\n )\n num_splits = _num_splits\n\n for i in range(num_splits):\n p = Path(path) / f\"split.{i}\"\n if not p.exists():\n raise FileNotFoundError(f\"{p} is not found\")\n\n # 2. Create functions to build an iter factory for each splits\n data_path_and_name_and_type_list = [\n [\n (str(Path(p) / f\"split.{i}\"), n, t)\n for p, n, t in iter_options.data_path_and_name_and_type\n ]\n for i in range(num_splits)\n ]\n shape_files_list = [\n [str(Path(s) / f\"split.{i}\") for s in iter_options.shape_files]\n for i in range(num_splits)\n ]\n num_iters_per_epoch_list = [\n (iter_options.num_iters_per_epoch + i) // num_splits\n if iter_options.num_iters_per_epoch is not None\n else None\n for i in range(num_splits)\n ]\n max_cache_size = iter_options.max_cache_size / num_splits\n\n # Note that iter-factories are built for each epoch at runtime lazily.\n build_funcs = [\n functools.partial(\n cls.build_iter_factory,\n args,\n distributed_option,\n mode,\n kwargs=dict(\n data_path_and_name_and_type=_data_path_and_name_and_type,\n shape_files=_shape_files,\n num_iters_per_epoch=_num_iters_per_epoch,\n max_cache_size=max_cache_size,\n ),\n )\n for (\n _data_path_and_name_and_type,\n _shape_files,\n _num_iters_per_epoch,\n ) in zip(\n data_path_and_name_and_type_list,\n shape_files_list,\n num_iters_per_epoch_list,\n )\n ]\n\n # 3. Build MultipleIterFactory\n return MultipleIterFactory(\n build_funcs=build_funcs, shuffle=iter_options.train, seed=args.seed\n )\n\n @classmethod\n def build_streaming_iterator(\n cls,\n data_path_and_name_and_type,\n preprocess_fn,\n collate_fn,\n key_file: str = None,\n batch_size: int = 1,\n dtype: str = np.float32,\n num_workers: int = 1,\n allow_variable_data_keys: bool = False,\n ngpu: int = 0,\n inference: bool = False,\n ) -> DataLoader:\n \"\"\"Build DataLoader using iterable dataset\"\"\"\n assert check_argument_types()\n # For backward compatibility for pytorch DataLoader\n if collate_fn is not None:\n kwargs = dict(collate_fn=collate_fn)\n else:\n kwargs = {}\n\n # IterableDataset is supported from pytorch=1.2\n if LooseVersion(torch.__version__) >= LooseVersion(\"1.2\"):\n dataset = IterableESPnetDataset(\n data_path_and_name_and_type,\n float_dtype=dtype,\n preprocess=preprocess_fn,\n key_file=key_file,\n )\n if dataset.apply_utt2category:\n kwargs.update(batch_size=1)\n else:\n kwargs.update(batch_size=batch_size)\n else:\n dataset = ESPnetDataset(\n data_path_and_name_and_type,\n float_dtype=dtype,\n preprocess=preprocess_fn,\n )\n if key_file is None:\n key_file = data_path_and_name_and_type[0][0]\n batch_sampler = UnsortedBatchSampler(\n batch_size=batch_size,\n key_file=key_file,\n drop_last=False,\n )\n kwargs.update(batch_sampler=batch_sampler)\n\n cls.check_task_requirements(\n dataset, allow_variable_data_keys, train=False, inference=inference\n )\n\n return DataLoader(\n dataset=dataset,\n pin_memory=ngpu > 0,\n num_workers=num_workers,\n **kwargs,\n )\n\n # ~~~~~~~~~ The methods below are mainly used for inference ~~~~~~~~~\n @classmethod\n def build_model_from_file(\n cls,\n config_file: Union[Path, str],\n model_file: Union[Path, str] = None,\n device: str = \"cpu\",\n ) -> Tuple[AbsESPnetModel, argparse.Namespace]:\n \"\"\"This method is used for inference or fine-tuning.\n\n Args:\n config_file: The yaml file saved when training.\n model_file: The model file saved when training.\n device:\n\n \"\"\"\n assert check_argument_types()\n config_file = Path(config_file)\n\n with config_file.open(\"r\", encoding=\"utf-8\") as f:\n args = yaml.safe_load(f)\n args = argparse.Namespace(**args)\n model = cls.build_model(args)\n if not isinstance(model, AbsESPnetModel):\n raise RuntimeError(\n f\"model must inherit {AbsESPnetModel.__name__}, but got {type(model)}\"\n )\n model.to(device)\n if model_file is not None:\n if device == \"cuda\":\n # NOTE(kamo): \"cuda\" for torch.load always indicates cuda:0\n # in PyTorch<=1.4\n device = f\"cuda:{torch.cuda.current_device()}\"\n model.load_state_dict(torch.load(model_file, map_location=device))\n\n return model, args\n" ]
[ [ "torch.autograd.set_detect_anomaly", "torch.load", "numpy.min", "torch.cuda.current_device", "torch.utils.data.DataLoader", "torch.multiprocessing.spawn.SpawnContext", "torch.multiprocessing.get_context", "numpy.max", "numpy.mean", "torch.distributed.get_rank", "torch.distributed.get_world_size" ] ]
geexie/mlir-extensions
[ "16f2a157d479e959dcd76428b1407a678abdff3f" ]
[ "numba_dpcomp/mlir/tests/test_numpy.py" ]
[ "# Copyright 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numba\n# from numba_dpcomp import njit\nfrom numba_dpcomp import vectorize\nfrom numba_dpcomp.mlir.passes import print_pass_ir, get_print_buffer\nfrom numpy.testing import assert_equal, assert_allclose # for nans comparison\nimport numpy as np\nimport itertools\nimport math\nfrom functools import partial\nimport pytest\nfrom sklearn.datasets import make_regression\n\nfrom .utils import parametrize_function_variants\nfrom .utils import njit_cached as njit\n\nnp.seterr(all='ignore')\n\ndef _vectorize_reference(func, arg1):\n ret = np.empty(arg1.shape, arg1.dtype)\n for ind, val in np.ndenumerate(arg1):\n ret[ind] = func(val)\n return ret\n\n_arr_1d_int = np.array([1,2,3,4,5,6,7,8])\n_arr_1d_float = np.array([1.0,2.1,3.2,4.3,5.4,6.5,7.6,8.7])\n_arr_2d_int = np.array([[1,2,3,4],[5,6,7,8]])\n_arr_2d_float = np.array([[1.0,2.1,3.2,4.3],[5.4,6.5,7.6,8.7]])\n_test_arrays = [\n _arr_1d_int,\n _arr_1d_float,\n _arr_2d_int,\n _arr_2d_float,\n _arr_2d_int.T,\n _arr_2d_float.T,\n]\n_test_arrays_ids = [\n '1d_int',\n '1d_float',\n '2d_int',\n '2d_float',\n '2d_int.T',\n '2d_float.T',\n]\n\n@parametrize_function_variants(\"py_func\", [\n 'lambda a: a.sum()',\n 'lambda a: np.sum(a)',\n 'lambda a: np.mean(a)',\n 'lambda a: np.sqrt(a)',\n 'lambda a: np.square(a)',\n 'lambda a: np.log(a)',\n 'lambda a: np.sin(a)',\n 'lambda a: np.cos(a)',\n 'lambda a: a.size',\n 'lambda a: a.T',\n 'lambda a: a.T.T',\n])\[email protected](\"arr\",\n _test_arrays,\n ids=_test_arrays_ids)\ndef test_unary(py_func, arr, request):\n jit_func = njit(py_func)\n assert_allclose(py_func(arr), jit_func(arr), rtol=1e-15, atol=1e-15)\n\n_test_binary_test_arrays = [1, 2.5, np.array([1,2,3]), np.array([4.4,5.5,6.6])]\n_test_binary_test_arrays_ids = ['1', '2.5', 'np.array([1,2,3])', 'np.array([4.4,5.5,6.6])']\n@parametrize_function_variants(\"py_func\", [\n 'lambda a, b: np.add(a, b)',\n 'lambda a, b: a + b',\n 'lambda a, b: np.subtract(a, b)',\n 'lambda a, b: a - b',\n 'lambda a, b: np.multiply(a, b)',\n 'lambda a, b: a * b',\n 'lambda a, b: np.power(a, b)',\n 'lambda a, b: a ** b',\n 'lambda a, b: np.true_divide(a, b)',\n 'lambda a, b: a / b',\n])\[email protected](\"a\",\n _test_binary_test_arrays,\n ids=_test_binary_test_arrays_ids)\[email protected](\"b\",\n _test_binary_test_arrays,\n ids=_test_binary_test_arrays_ids)\ndef test_binary(py_func, a, b):\n jit_func = njit(py_func)\n assert_equal(py_func(a,b), jit_func(a,b))\n\n_test_broadcast_test_arrays = [\n 1,\n np.array([1]),\n np.array([[1]]),\n np.array([[1,2],[3,4]]),\n np.array([5,6]),\n np.array([[5],[6]]),\n np.array([[5,6]]),\n]\n_test_broadcast_test_arrays_ids = [\n '1',\n 'np.array([1])',\n 'np.array([[1]])',\n 'np.array([[1,2],[3,4]])',\n 'np.array([5,6])',\n 'np.array([[5],[6]])',\n 'np.array([[5,6]])',\n]\[email protected](\"a\",\n _test_broadcast_test_arrays,\n ids=_test_broadcast_test_arrays_ids)\[email protected](\"b\",\n _test_broadcast_test_arrays,\n ids=_test_broadcast_test_arrays_ids)\ndef test_broadcast(a, b):\n def py_func(a, b):\n return np.add(a, b)\n\n jit_func = njit(py_func)\n assert_equal(py_func(a,b), jit_func(a,b))\n\ndef test_staticgetitem():\n def py_func(a):\n return a[1]\n\n jit_func = njit(py_func)\n arr = np.asarray([5,6,7])\n assert_equal(py_func(arr), jit_func(arr))\n\[email protected](\"i\",\n list(range(3)))\ndef test_getitem1(i):\n def py_func(a, b):\n return a[b]\n\n jit_func = njit(py_func)\n arr = np.asarray([5,6,7])\n assert_equal(py_func(arr, i), jit_func(arr, i))\n\ndef test_getitem2():\n def py_func(a, b):\n return a[b]\n\n jit_func = njit(py_func)\n arr = np.asarray([[[1,2,3],[5,6,7]]])\n assert_equal(py_func(arr, 0), jit_func(arr, 0))\n\ndef test_getitem3():\n def py_func(a, b, c):\n return a[b, c]\n\n jit_func = njit(py_func)\n arr = np.asarray([[[1,2,3],[5,6,7]]])\n assert_equal(py_func(arr, 0, 0), jit_func(arr, 0, 0))\n\ndef test_array_len():\n def py_func(a):\n return len(a)\n\n jit_func = njit(py_func)\n arr = np.asarray([5,6,7])\n assert_equal(py_func(arr), jit_func(arr))\n\n@parametrize_function_variants(\"py_func\", [\n 'lambda a: np.sum(a, axis=0)',\n 'lambda a: np.sum(a, axis=1)',\n ])\[email protected](\"arr\", [\n np.array([[1,2,3],[4,5,6]], dtype=np.int32),\n np.array([[1,2,3],[4,5,6]], dtype=np.float32),\n ])\ndef test_sum_axis(py_func, arr):\n jit_func = njit(py_func)\n assert_equal(py_func(arr), jit_func(arr))\n\ndef test_sum_add():\n def py_func(a, b):\n return np.add(a, b).sum()\n\n jit_func = njit(py_func)\n arr1 = np.asarray([1,2,3])\n arr2 = np.asarray([4,5,6])\n assert_equal(py_func(arr1, arr2), jit_func(arr1, arr2))\n\ndef test_sum_add2():\n def py_func(a, b, c):\n t = np.add(a, b)\n return np.add(t, c).sum()\n\n jit_func = njit(py_func)\n arr1 = np.asarray([1,2,3])\n arr2 = np.asarray([4,5,6])\n arr3 = np.asarray([7,8,9])\n assert_equal(py_func(arr1, arr2, arr3), jit_func(arr1, arr2, arr3))\n\[email protected](\"a,b\", [\n (np.array([1,2,3], np.float32), np.array([4,5,6], np.float32)),\n (np.array([[1,2,3],[4,5,6]], np.float32), np.array([[1,2],[3,4],[5,6]], np.float32)),\n ])\[email protected](\"parallel\", [False, True])\ndef test_dot(a, b, parallel):\n def py_func(a, b):\n return np.dot(a, b)\n\n jit_func = njit(py_func, parallel=parallel)\n assert_equal(py_func(a, b), jit_func(a, b))\n\ndef test_prange_lowering():\n def py_func(arr):\n res = 0\n for i in numba.prange(len(arr)):\n res += arr[i]\n\n return res\n\n with print_pass_ir([],['ParallelToTbbPass']):\n jit_func = njit(py_func, parallel=True)\n arr = np.arange(10000, dtype=np.float32)\n assert_equal(py_func(arr), jit_func(arr))\n ir = get_print_buffer()\n assert ir.count('plier.parallel') == 1, ir\n\ndef test_loop_fusion1():\n def py_func(arr):\n l = len(arr)\n res1 = 0\n for i in numba.prange(l):\n res1 += arr[i]\n\n res2 = 1.0\n for i in numba. prange(l):\n res2 *= arr[i]\n\n return res1, res2\n\n with print_pass_ir([],['PostLinalgOptPass']):\n jit_func = njit(py_func)\n arr = np.arange(1, 15, dtype=np.float32)\n assert_equal(py_func(arr), jit_func(arr))\n ir = get_print_buffer()\n assert ir.count('scf.parallel') == 1, ir\n assert ir.count('memref.load') == 1, ir\n\ndef test_loop_fusion2():\n def py_func(arr):\n l = len(arr)\n res1 = 0\n for i in numba.prange(l):\n res1 += arr[i]\n\n res1 += 10\n\n res2 = 0.0\n for i in numba. prange(l):\n res2 *= arr[i]\n\n return res1, res2\n\n with print_pass_ir([],['PostLinalgOptPass']):\n jit_func = njit(py_func)\n arr = np.arange(1, 15, dtype=np.float32)\n assert_equal(py_func(arr), jit_func(arr))\n ir = get_print_buffer()\n assert ir.count('scf.parallel') == 1, ir\n assert ir.count('memref.load') == 1, ir\n\ndef test_loop_fusion3():\n def py_func(arr):\n l = len(arr)\n res1 = 0\n for i in numba.prange(l):\n res1 += arr[i]\n\n res2 = 1.0\n for i in numba. prange(l):\n res2 *= (arr[i] * res1)\n\n return res1, res2\n\n with print_pass_ir([],['PostLinalgOptPass']):\n jit_func = njit(py_func)\n arr = np.arange(1, 15, dtype=np.float32)\n assert_equal(py_func(arr), jit_func(arr))\n ir = get_print_buffer()\n assert ir.count('scf.parallel') == 2, ir\n assert ir.count('memref.load') == 2, ir\n\[email protected](\"dtype\", [np.int32, np.int64, np.float32])\ndef test_np_reduce(dtype):\n def py_func(arr):\n return arr.sum()\n\n with print_pass_ir([],['PostLinalgOptPass']):\n jit_func = njit(py_func)\n arr = np.array([[1,2,3],[4,5,6]], dtype=dtype)\n assert_equal(py_func(arr), jit_func(arr))\n ir = get_print_buffer()\n assert ir.count('scf.parallel') == 1, ir\n assert ir.count('memref.load') == 1, ir\n\ndef test_indirect_call_array():\n def inner_func(a):\n return a + 3\n\n def func(func, *args):\n return func(*args)\n\n jit_inner_func = njit(inner_func)\n jit_func = njit(func)\n\n arr = np.array([[1,2,3],[4,5,6]])\n # arr = 5\n assert_equal(func(inner_func, arr), jit_func(jit_inner_func, arr))\n\ndef test_loop_if():\n def py_func(arr):\n for i in range(len(arr)):\n if arr[i] == 5:\n arr[i] = 6\n return arr\n\n jit_func = njit(py_func)\n arr1 = np.arange(100)\n arr2 = np.arange(100)\n assert_equal(py_func(arr1), jit_func(arr2))\n\ndef test_static_setitem():\n def py_func(a):\n a[1] = 42\n return a[1]\n\n jit_func = njit(py_func)\n arr = np.asarray([1,2,3])\n assert_equal(py_func(arr), jit_func(arr))\n\ndef test_setitem1():\n def py_func(a, b):\n a[b] = 42\n return a[b]\n\n jit_func = njit(py_func)\n arr = np.asarray([1,2,3])\n assert_equal(py_func(arr, 1), jit_func(arr, 1))\n\ndef test_setitem2():\n def py_func(a, b, c):\n a[b, c] = 42\n return a[b, c]\n\n jit_func = njit(py_func)\n arr = np.asarray([[1,2,3],[4,5,6]])\n assert_equal(py_func(arr, 1, 2), jit_func(arr, 1, 2))\n\ndef test_setitem_loop():\n def py_func(a):\n for i in range(len(a)):\n a[i] = a[i] + i\n return a.sum()\n\n jit_func = njit(py_func)\n arr = np.asarray([3,2,1])\n assert_equal(py_func(arr.copy()), jit_func(arr.copy()))\n\ndef test_array_bounds1():\n def py_func(a):\n res = 0\n for i in range(len(a)):\n if i >= len(a) or i < 0:\n res = res + 1\n else:\n res = res + a[i]\n return res\n\n arr = np.asarray([3,2,1])\n\n with print_pass_ir([],['PostLinalgOptPass']):\n jit_func = njit(py_func)\n assert_equal(py_func(arr.copy()), jit_func(arr.copy()))\n ir = get_print_buffer()\n assert ir.count('cmpi') == 0, ir\n\ndef test_array_bounds2():\n def py_func(a):\n res = 0\n for i in range(len(a)):\n if i < len(a) and i >= 0:\n res = res + a[i]\n else:\n res = res + 1\n return res\n\n arr = np.asarray([3,2,1])\n\n with print_pass_ir([],['PostLinalgOptPass']):\n jit_func = njit(py_func)\n assert_equal(py_func(arr.copy()), jit_func(arr.copy()))\n ir = get_print_buffer()\n assert ir.count('cmpi') == 0, ir\n\ndef test_array_bounds3():\n def py_func(a):\n res = 0\n for i in range(len(a)):\n if 0 <= i < len(a):\n res = res + a[i]\n else:\n res = res + 1\n return res\n\n arr = np.asarray([3,2,1])\n\n with print_pass_ir([],['PostLinalgOptPass']):\n jit_func = njit(py_func)\n assert_equal(py_func(arr.copy()), jit_func(arr.copy()))\n ir = get_print_buffer()\n assert ir.count('cmpi') == 0, ir\n\ndef test_array_bounds4():\n def py_func(a):\n res = 0\n for i in range(len(a) - 1):\n if 0 <= i < (len(a) - 1):\n res = res + a[i]\n else:\n res = res + 1\n return res\n\n arr = np.asarray([3,2,1])\n\n with print_pass_ir([],['PostLinalgOptPass']):\n jit_func = njit(py_func)\n assert_equal(py_func(arr.copy()), jit_func(arr.copy()))\n ir = get_print_buffer()\n assert ir.count('cmpi') == 0, ir\n\ndef test_array_shape():\n def py_func(a):\n shape = a.shape\n return shape[0] + shape[1] * 10\n\n jit_func = njit(py_func)\n arr = np.array([[1,2,3],[4,5,6]])\n assert_equal(py_func(arr), jit_func(arr))\n\ndef test_array_return():\n def py_func(a):\n return a\n\n jit_func = njit(py_func)\n arr = np.array([1,2,3])\n assert_equal(py_func(arr), jit_func(arr))\n\ndef test_array_prange_const():\n def py_func(a, b):\n a[0] = 42\n for i in numba.prange(b):\n a[0] = 1\n return a[0]\n\n jit_func = njit(py_func, parallel=True)\n arr = np.array([0.0])\n assert_equal(py_func(arr, 5), jit_func(arr, 5))\n\ndef test_empty1():\n def py_func(d):\n a = np.empty(d)\n for i in range(d):\n a[i] = i\n return a\n\n jit_func = njit(py_func)\n assert_equal(py_func(5), jit_func(5))\n\ndef test_empty2():\n def py_func(d1, d2):\n a = np.empty((d1, d2))\n for i in range(d1):\n for j in range(d2):\n a[i, j] = i + j * 10\n return a\n\n jit_func = njit(py_func)\n assert_equal(py_func(5, 7), jit_func(5, 7))\n\[email protected](\"dtype\", ['int32','int64','float32','float64'])\ndef test_empty3(dtype):\n def py_func(a):\n return np.empty(a.shape, a.dtype)\n\n jit_func = njit(py_func)\n arr = np.array([1,2,3], dtype=dtype)\n assert_equal(py_func(arr).shape, jit_func(arr).shape)\n assert_equal(py_func(arr).dtype, jit_func(arr).dtype)\n\ndef test_zeros1():\n def py_func(d):\n return np.zeros(d)\n\n jit_func = njit(py_func)\n assert_equal(py_func(5), jit_func(5))\n\[email protected](\"dtype\", ['int32','int64','float32','float64'])\ndef test_zeros2(dtype):\n def py_func(a):\n return np.zeros(a.shape, a.dtype)\n\n jit_func = njit(py_func)\n arr = np.array([1, 2, 3], dtype=dtype)\n assert_equal(py_func(arr).shape, jit_func(arr).shape)\n assert_equal(py_func(arr).dtype, jit_func(arr).dtype)\n\[email protected]\ndef test_zeros3():\n def py_func(d):\n return np.zeros(d, dtype=np.dtype('int64'))\n\n jit_func = njit(py_func)\n assert_equal(py_func(5), jit_func(5))\n\ndef test_zeros4():\n def py_func(d):\n return np.zeros(d)\n\n jit_func = njit(py_func)\n assert_equal(py_func((2, 1)), jit_func((2, 1)))\n\ndef test_parallel():\n def py_func(a, b):\n return np.add(a, b)\n\n jit_func = njit(py_func, parallel=True)\n arr = np.asarray([[[1,2,3],[4,5,6]],\n [[1,2,3],[4,5,6]]])\n assert_equal(py_func(arr,arr), jit_func(arr,arr))\n\ndef test_parallel_reduce():\n def py_func(a):\n shape = a.shape\n res = 0\n for i in range(shape[0]):\n for j in numba.prange(shape[1]):\n for k in numba.prange(shape[2]):\n res = res + a[i,j,k]\n return res\n\n jit_func = njit(py_func, parallel=True)\n arr = np.asarray([[[1,2,3],[4,5,6]]]).repeat(10000,0)\n assert_equal(py_func(arr), jit_func(arr))\n\n@parametrize_function_variants(\"func\", [\n 'lambda a : a + 1',\n 'lambda a : math.erf(a)',\n # 'lambda a : 5 if a == 1 else a', TODO: investigate\n])\[email protected](\"arr\",\n _test_arrays,\n ids=_test_arrays_ids)\ndef test_vectorize(func, arr):\n arr = np.array(arr)\n vec_func = vectorize(func)\n assert_equal(_vectorize_reference(func, arr), vec_func(arr))\n\[email protected](\"arr\",\n _test_arrays,\n ids=_test_arrays_ids)\ndef test_vectorize_indirect(arr):\n def func(a):\n return a + 1\n\n vec_func = vectorize(func)\n\n def py_func(a):\n return vec_func(a)\n\n jit_func = njit(py_func, parallel=True)\n\n arr = np.array(arr)\n assert_equal(_vectorize_reference(func, arr), jit_func(arr))\n\[email protected](\"arr\", [\n np.array([[1,2],[3,4]]),\n # np.array([[1,2],[3,4]]).T,\n])\ndef test_fortran_layout(arr):\n def py_func(a):\n return a.T\n\n jit_func = njit(py_func)\n\n assert_equal(py_func(arr), jit_func(arr))\n\n@parametrize_function_variants(\"a\", [\n # 'np.array(1)', TODO zero rank arrays\n # 'np.array(2.5)',\n 'np.array([])',\n 'np.array([1,2,3])',\n 'np.array([[1,2,3]])',\n 'np.array([[1,2],[3,4],[5,6]])',\n ])\ndef test_atleast2d(a):\n def py_func(a):\n return np.atleast_2d(a)\n\n jit_func = njit(py_func)\n assert_equal(py_func(a), jit_func(a))\n\n_test_reshape_test_array = np.array([1,2,3,4,5,6,7,8,9,10,11,12])\n_test_reshape_test_arrays = [\n _test_reshape_test_array,\n _test_reshape_test_array.reshape((2,6)),\n _test_reshape_test_array.reshape((2,3,2)),\n]\n\n@parametrize_function_variants(\"py_func\", [\n 'lambda a: a.reshape(a.size)',\n 'lambda a: a.reshape((a.size,))',\n 'lambda a: a.reshape((a.size,1))',\n 'lambda a: a.reshape((1, a.size))',\n 'lambda a: a.reshape((1, a.size, 1))',\n ])\[email protected](\"array\", _test_reshape_test_arrays)\ndef test_reshape(py_func, array):\n jit_func = njit(py_func)\n assert_equal(py_func(array), jit_func(array))\n\[email protected](reason=\"numba: reshape() supports contiguous array only\")\ndef test_reshape_non_contiguous():\n def py_func(a):\n return a.reshape(4)\n jit_func = njit(py_func)\n array = np.arange(16).reshape((4,4))[1:3,1:3]\n assert_equal(py_func(array), jit_func(array))\n\n@parametrize_function_variants(\"py_func\", [\n # 'lambda a: a.flat', TODO: flat support\n 'lambda a: a.flatten()',\n ])\[email protected](\"array\", _test_reshape_test_arrays)\ndef test_flatten(py_func, array):\n jit_func = njit(py_func)\n assert_equal(py_func(array), jit_func(array))\n\n@parametrize_function_variants(\"py_func\", [\n 'lambda a, b: ()',\n 'lambda a, b: (a,b)',\n 'lambda a, b: ((a,b),(a,a),(b,b),())',\n ])\[email protected](\"a,b\",\n itertools.product(*(([1,2.5,np.array([1,2,3]), np.array([4.5,6.7,8.9])],)*2))\n )\ndef test_tuple_ret(py_func, a, b):\n jit_func = njit(py_func)\n assert_equal(py_func(a, b), jit_func(a, b))\n\[email protected](\"arrays\",\n [([1,2,3],[4,5,6]),\n ([[1,2],[3,4]],[[5,6],[7,8]]),\n ([[[1],[2]],[[3],[4]]],[[[5],[6]],[[7],[8]]]),\n ([1,2,3],[4,5,6],[7,8,9]),\n ([1,2],[3,4],[5,6],[7,8]),\n ])\[email protected](\"axis\",\n [0,1,2]) # TODO: None\ndef test_concat(arrays, axis):\n arr = tuple(np.array(a) for a in arrays)\n num_dims = len(arr[0].shape)\n if axis >= num_dims:\n pytest.skip() # TODO: unselect\n num_arrays = len(arrays)\n if num_arrays == 2:\n def py_func(arr1, arr2):\n return np.concatenate((arr1, arr2), axis=axis)\n elif num_arrays == 3:\n def py_func(arr1, arr2, arr3):\n return np.concatenate((arr1, arr2, arr3), axis=axis)\n elif num_arrays == 4:\n def py_func(arr1, arr2, arr3, arr4):\n return np.concatenate((arr1, arr2, arr3, arr4), axis=axis)\n else:\n assert False\n jit_func = njit(py_func)\n assert_equal(py_func(*arr), jit_func(*arr))\n\n@parametrize_function_variants(\"py_func\", [\n 'lambda a, b, c, d: a[b:c]',\n 'lambda a, b, c, d: a[3:c]',\n 'lambda a, b, c, d: a[b:4]',\n 'lambda a, b, c, d: a[3:4]',\n 'lambda a, b, c, d: a[b:c:d]',\n 'lambda a, b, c, d: a[b:c:1]',\n 'lambda a, b, c, d: a[b:c:2]',\n 'lambda a, b, c, d: a[3:4:2]',\n ])\ndef test_slice1(py_func):\n arr = np.array([1,2,3,4,5,6,7,8])\n jit_func = njit(py_func)\n assert_equal(py_func(arr, 3, 4, 2), jit_func(arr, 3, 4, 2))\n\ndef test_slice2():\n def py_func(a, i, j, k):\n a1 = a[1]\n a2 = a1[2]\n return a2[3]\n\n arr = np.arange(3*4*5).reshape((3,4,5))\n jit_func = njit(py_func)\n assert_equal(py_func(arr, 1,2,3), jit_func(arr, 1,2,3))\n\ndef test_multidim_slice():\n def py_func(a, b):\n return a[1, b,:]\n jit_func = njit(py_func)\n\n a = np.array([[[1],[2],[3]],[[4],[5],[6]]])\n assert_equal(py_func(a, 0), jit_func(a, 0))\n\ndef test_size_ret():\n def py_func(a, b):\n return a.size / b\n jit_func = njit(py_func)\n\n a = np.array([[[1],[2],[3]],[[4],[5],[6]]])\n assert_equal(py_func(a, 3), jit_func(a, 3))\n\[email protected](\"a\", [\n np.array([[1,2],[4,5]])\n ])\[email protected](\"b\", [True, False])\ndef test_tensor_if(a, b):\n def py_func(m, rowvar):\n m_arr = np.atleast_2d(m)\n if not rowvar:\n m_arr = m_arr.T\n return m_arr\n jit_func = njit(py_func)\n\n assert_equal(py_func(a, b), jit_func(a, b))\n\ndef _cov(m, y=None, rowvar=True, bias=False, ddof=None):\n return np.cov(m, y, rowvar, bias, ddof)\n\n_rnd = np.random.RandomState(42)\n\n@parametrize_function_variants(\"m\", [\n 'np.array([[0, 2], [1, 1], [2, 0]]).T',\n '_rnd.randn(100).reshape(5, 20)',\n 'np.asfortranarray(np.array([[0, 2], [1, 1], [2, 0]]).T)',\n '_rnd.randn(100).reshape(5, 20)[:, ::2]',\n 'np.array([0.3942, 0.5969, 0.7730, 0.9918, 0.7964])',\n # 'np.full((4, 5), fill_value=True)', TODO\n 'np.array([np.nan, 0.5969, -np.inf, 0.9918, 0.7964])',\n 'np.linspace(-3, 3, 33).reshape(33, 1)',\n\n # non-array inputs\n '((0.1, 0.2), (0.11, 0.19), (0.09, 0.21))', # UniTuple\n '((0.1, 0.2), (0.11, 0.19), (0.09j, 0.21j))', # Tuple\n '(-2.1, -1, 4.3)',\n '(1, 2, 3)',\n '[4, 5, 6]',\n '((0.1, 0.2, 0.3), (0.1, 0.2, 0.3))',\n '[(1, 2, 3), (1, 3, 2)]',\n '3.142',\n # '((1.1, 2.2, 1.5),)',\n\n # empty data structures\n 'np.array([])',\n 'np.array([]).reshape(0, 2)',\n 'np.array([]).reshape(2, 0)',\n '()',\n ])\ndef test_cov_basic(m):\n if isinstance(m, (list, float)) or len(m) == 0 or np.iscomplexobj(m):\n pytest.xfail()\n py_func = _cov\n jit_func = njit(py_func)\n assert_allclose(py_func(m), jit_func(m), rtol=1e-15, atol=1e-15)\n\n_cov_inputs_m = _rnd.randn(105).reshape(15, 7)\[email protected](\"m\",\n [_cov_inputs_m])\[email protected](\"y\",\n [None, _cov_inputs_m[::-1]])\[email protected](\"rowvar\",\n [False, True])\[email protected](\"bias\",\n [False, True])\[email protected](\"ddof\",\n [None, -1, 0, 1, 3.0, True])\ndef test_cov_explicit_arguments(m, y, rowvar, bias, ddof):\n py_func = _cov\n jit_func = njit(py_func)\n assert_allclose(py_func(m=m, y=y, rowvar=rowvar, bias=bias, ddof=ddof), jit_func(m=m, y=y, rowvar=rowvar, bias=bias, ddof=ddof), rtol=1e-14, atol=1e-14)\n\n@parametrize_function_variants(\"m, y, rowvar\", [\n '(np.array([-2.1, -1, 4.3]), np.array([3, 1.1, 0.12]), True)',\n '(np.array([1, 2, 3]), np.array([1j, 2j, 3j]), True)',\n '(np.array([1j, 2j, 3j]), np.array([1, 2, 3]), True)',\n '(np.array([1, 2, 3]), np.array([1j, 2j, 3]), True)',\n '(np.array([1j, 2j, 3]), np.array([1, 2, 3]), True)',\n '(np.array([]), np.array([]), True)',\n '(1.1, 2.2, True)',\n '(_rnd.randn(10, 3), np.array([-2.1, -1, 4.3]).reshape(1, 3) / 10, True)',\n '(np.array([-2.1, -1, 4.3]), np.array([[3, 1.1, 0.12], [3, 1.1, 0.12]]), True)',\n # '(np.array([-2.1, -1, 4.3]), np.array([[3, 1.1, 0.12], [3, 1.1, 0.12]]), False)',\n '(np.array([[3, 1.1, 0.12], [3, 1.1, 0.12]]), np.array([-2.1, -1, 4.3]), True)',\n # '(np.array([[3, 1.1, 0.12], [3, 1.1, 0.12]]), np.array([-2.1, -1, 4.3]), False)',\n ])\ndef test_cov_edge_cases(m, y, rowvar):\n if not isinstance(m, np.ndarray) or not isinstance(y, np.ndarray) or np.iscomplexobj(m) or np.iscomplexobj(y):\n pytest.xfail()\n py_func = _cov\n jit_func = njit(py_func)\n assert_allclose(py_func(m=m, y=y, rowvar=rowvar), jit_func(m=m, y=y, rowvar=rowvar), rtol=1e-14, atol=1e-14)\n\[email protected](\"arr\", [\n np.array([1,2,3,4,5,6,7,8,9], dtype=np.int32).reshape((3,3)),\n np.array([1,2,3,4,5,6,7,8,9], dtype=np.float32).reshape((3,3)),\n np.array([1,2,3,4,5,6,7,8,9,0], dtype=np.int32).reshape((5,2)),\n np.array([1,2,3,4,5,6,7,8,9,0], dtype=np.float32).reshape((5,2)),\n np.array([1,2,3,4,5,6,7,8,9,0], dtype=np.int32).reshape((5,2)).T,\n np.array([1,2,3,4,5,6,7,8,9,0], dtype=np.float32).reshape((5,2)).T,\n ])\[email protected](\"parallel\", [False, True])\ndef test_mean_loop(arr, parallel):\n def py_func(data):\n tdata = data.T\n m = np.empty(tdata.shape[0])\n for i in numba.prange(tdata.shape[0]):\n m[i] = np.mean(tdata[i])\n return m\n\n jit_func = njit(py_func, parallel=parallel)\n assert_equal(py_func(arr), jit_func(arr))\n\[email protected](\"arr\", [\n np.array([1,2,3,4,5,6,7,8,9], dtype=np.int32).reshape((3,3)),\n np.array([1,2,3,4,5,6,7,8,9], dtype=np.float32).reshape((3,3)),\n np.array([1,2,3,4,5,6,7,8,9,0], dtype=np.int32).reshape((5,2)),\n np.array([1,2,3,4,5,6,7,8,9,0], dtype=np.float32).reshape((5,2)),\n np.array([1,2,3,4,5,6,7,8,9,0], dtype=np.int32).reshape((5,2)).T,\n np.array([1,2,3,4,5,6,7,8,9,0], dtype=np.float32).reshape((5,2)).T,\n make_regression(n_samples=2**10, n_features=2**7, random_state=0)[0],\n ])\[email protected](\"parallel\", [False, True])\ndef test_mean_loop_cov(arr, parallel):\n def py_func(data):\n tdata = data.T\n m = np.empty(tdata.shape[0])\n for i in numba.prange(tdata.shape[0]):\n m[i] = np.mean(tdata[i])\n c = data - m\n v = np.cov(c.T)\n return c, v\n\n jit_func = njit(py_func, parallel=parallel)\n c1, v1 = py_func(arr)\n c2, v2 = jit_func(arr)\n assert_allclose(c1, c2, rtol=1e-15, atol=1e-11)\n assert_allclose(v1, v2, rtol=1e-15, atol=1e-11)\n" ]
[ [ "numpy.dot", "numpy.random.RandomState", "numpy.asarray", "numpy.arange", "numpy.dtype", "numpy.concatenate", "numpy.seterr", "numpy.add", "numpy.cov", "numpy.atleast_2d", "numpy.iscomplexobj", "numpy.mean", "numpy.testing.assert_allclose", "sklearn.datasets.make_regression", "numpy.ndenumerate", "numpy.array", "numpy.zeros", "numpy.empty" ] ]
tongni1975/Hands-On-Machine-Learning-on-Google-Cloud-Platform
[ "156950530e64462bcbeebbb5d58eaf5561ed62db" ]
[ "Chapter13/rnn_hwr.py" ]
[ "from __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow.contrib import rnn\r\n\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\r\n\r\nlearning_rate = 0.001\r\ntraining_steps = 20000\r\nbatch_size = 128\r\ndisplay_step = 1000\r\n\r\nnum_input = 28 \r\ntimesteps = 28 \r\nnum_hidden = 128 \r\nnum_classes = 10 \r\n\r\nX = tf.placeholder(\"float\", [None, timesteps, num_input])\r\nY = tf.placeholder(\"float\", [None, num_classes])\r\n\r\nweights = {\r\n 'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))\r\n}\r\nbiases = {\r\n 'out': tf.Variable(tf.random_normal([num_classes]))\r\n}\r\n\r\ndef RNN(x, weights, biases):\r\n x = tf.unstack(x, timesteps, 1)\r\n lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)\r\n outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)\r\n return tf.matmul(outputs[-1], weights['out']) + biases['out']\r\n\r\nlogits = RNN(X, weights, biases)\r\nprediction = tf.nn.softmax(logits)\r\n\r\nloss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(\r\n logits=logits, labels=Y))\r\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\r\ntrain_op = optimizer.minimize(loss_op)\r\n\r\ncorrect_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))\r\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\r\n\r\ninit = tf.global_variables_initializer()\r\n\r\nwith tf.Session() as sess:\r\n\r\n sess.run(init)\r\n\r\n for step in range(1, training_steps+1):\r\n batch_x, batch_y = mnist.train.next_batch(batch_size)\r\n batch_x = batch_x.reshape((batch_size, timesteps, num_input))\r\n sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})\r\n if step % display_step == 0 or step == 1:\r\n loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,\r\n Y: batch_y})\r\n print(\"Step \" + str(step) + \", Minibatch Loss= \" + \\\r\n \"{:.4f}\".format(loss) + \", Training Accuracy= \" + \\\r\n \"{:.3f}\".format(acc))\r\n\r\n print(\"End of the optimization process \")\r\n\r\n test_len = 128\r\n test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_input))\r\n test_label = mnist.test.labels[:test_len]\r\n print(\"Testing Accuracy:\", \\\r\n sess.run(accuracy, feed_dict={X: test_data, Y: test_label}))\r\n\r\n" ]
[ [ "tensorflow.matmul", "tensorflow.nn.softmax", "tensorflow.unstack", "tensorflow.contrib.rnn.BasicLSTMCell", "tensorflow.cast", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.contrib.rnn.static_rnn", "tensorflow.Session", "tensorflow.nn.softmax_cross_entropy_with_logits_v2", "tensorflow.argmax", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.random_normal" ] ]
nikitacs16/d_bi_att_flow
[ "8d984cc8c0dc1112dbad61198caa616f83b54234" ]
[ "basic/graph_handler.py" ]
[ "import gzip\nimport json\nfrom json import encoder\nimport os\n\nimport tensorflow as tf\n\nfrom basic.evaluator import Evaluation, F1Evaluation\nfrom my.utils import short_floats\nfrom metrics.evaluate_off import evaluate\nimport pickle\n\n\nclass GraphHandler(object):\n def __init__(self, config, model):\n self.config = config\n self.model = model\n self.saver = tf.train.Saver(max_to_keep=config.max_to_keep)\n self.writer = None\n self.save_path = os.path.join(config.save_dir, config.model_name)\n self.best_squad_f1 = 0\n\n def initialize(self, sess):\n sess.run(tf.initialize_all_variables())\n if self.config.load:\n self._load(sess)\n\n if self.config.mode == 'train':\n self.writer = tf.train.SummaryWriter(self.config.log_dir, graph=tf.get_default_graph())\n if self.config.mode == 'test' and self.config.save_on_best_f1:\n self._load(sess,latest_filename=\"checkpoint_best\")\n\n def save(self, sess, global_step=None):\n saver = tf.train.Saver(max_to_keep=self.config.max_to_keep)\n saver.save(sess, self.save_path, global_step=global_step)\n\n def _load(self, sess,latest_filename=None):\n config = self.config\n vars_ = {var.name.split(\":\")[0]: var for var in tf.all_variables()}\n if config.load_ema:\n ema = self.model.var_ema\n for var in tf.trainable_variables():\n del vars_[var.name.split(\":\")[0]]\n vars_[ema.average_name(var)] = var\n saver = tf.train.Saver(vars_, max_to_keep=config.max_to_keep)\n\n if config.load_path:\n save_path = config.load_path\n elif config.load_step > 0:\n save_path = os.path.join(config.save_dir, \"{}-{}\".format(config.model_name, config.load_step))\n else:\n save_dir = config.save_dir\n if latest_filename is not None:\n checkpoint = tf.train.get_checkpoint_state(save_dir,latest_filename=latest_filename)\n else:\n checkpoint = tf.train.get_checkpoint_state(save_dir)\n assert checkpoint is not None, \"cannot load checkpoint at {}\".format(save_dir)\n save_path = checkpoint.model_checkpoint_path\n print(\"Loading saved model from {}\".format(save_path))\n saver.restore(sess, save_path)\n\n def add_summary(self, summary, global_step):\n self.writer.add_summary(summary, global_step)\n\n def add_summaries(self, summaries, global_step):\n for summary in summaries:\n self.add_summary(summary, global_step)\n\n def dump_eval(self, e, precision=2, path=None):\n assert isinstance(e, Evaluation)\n if self.config.dump_pickle:\n path = path or os.path.join(self.config.eval_dir, \"{}-{}.pklz\".format(e.data_type, str(e.global_step).zfill(6)))\n with gzip.open(path, 'wb', compresslevel=3) as fh:\n pickle.dump(e.dict, fh)\n else:\n path = path or os.path.join(self.config.eval_dir, \"{}-{}.json\".format(e.data_type, str(e.global_step).zfill(6)))\n with open(path, 'w') as fh:\n json.dump(short_floats(e.dict, precision), fh)\n\n def dump_answer(self, e, global_step=0, sess=None,path=None):\n assert isinstance(e, Evaluation)\n path = path or os.path.join(self.config.answer_dir, \"{}-{}.json\".format(e.data_type, str(e.global_step).zfill(6)))\n with open(path, 'w') as fh:\n json.dump(e.id2answer_dict, fh)\n if self.config.save_on_best_f1:\n e,f = evaluate(os.path.join(self.config.source_dir,self.config.dev_file_name),path)\n if f > self.best_squad_f1:\n self.best_squad_f1 = f\n saver = tf.train.Saver(max_to_keep=self.config.max_to_keep)\n saver.save(sess, self.save_path, global_step=global_step, latest_filename='checkpoint_best')\n\n\n" ]
[ [ "tensorflow.train.get_checkpoint_state", "tensorflow.all_variables", "tensorflow.trainable_variables", "tensorflow.initialize_all_variables", "tensorflow.train.Saver", "tensorflow.get_default_graph" ] ]
fr42k/gap-wgan-gp
[ "4e373c43d606a1b83f76893d93f9cf8be8cd460d" ]
[ "py3/wgan_gp_pytorch.py" ]
[ "import os, sys\nsys.path.append(os.getcwd())\n\nimport time\nimport tflib as lib\nimport tflib.save_images\nimport tflib.cifar10\nimport tflib.plot\nimport tflib.inception_score\n\nimport numpy as np\n\n\nimport torch\nimport torchvision\nfrom torch import nn\nfrom torch import autograd\nfrom torch import optim\n\n# Download CIFAR-10 (Python version) at\n# https://www.cs.toronto.edu/~kriz/cifar.html and fill in the path to the\n# extracted files here!\nDATA_DIR = 'cifar-10-batches-py/'\nif len(DATA_DIR) == 0:\n raise Exception('Please specify path to data directory in gan_cifar.py!')\n\nEXP = 'origin'\nMODE = 'wgan-gp' # Valid options are dcgan, wgan, or wgan-gp\nDIM = 128 # This overfits substantially; you're probably better off with 64\nLAMBDA = 10 # Gradient penalty lambda hyperparameter\nCRITIC_ITERS = 5 # How many critic iterations per generator iteration\nBATCH_SIZE = 64 # Batch size\nITERS = 200000 # How many generator iterations to train for\nOUTPUT_DIM = 3072 # Number of pixels in CIFAR10 (3*32*32)\n\n\nclass Generator(nn.Module):\n def __init__(self):\n super(Generator, self).__init__()\n preprocess = nn.Sequential(\n nn.Linear(128, 4 * 4 * 4 * DIM),\n nn.BatchNorm2d(4 * 4 * 4 * DIM),\n nn.ReLU(True),\n )\n\n block1 = nn.Sequential(\n nn.ConvTranspose2d(4 * DIM, 2 * DIM, 2, stride=2),\n nn.BatchNorm2d(2 * DIM),\n nn.ReLU(True),\n )\n block2 = nn.Sequential(\n nn.ConvTranspose2d(2 * DIM, DIM, 2, stride=2),\n nn.BatchNorm2d(DIM),\n nn.ReLU(True),\n )\n deconv_out = nn.ConvTranspose2d(DIM, 3, 2, stride=2)\n\n self.preprocess = preprocess\n self.block1 = block1\n self.block2 = block2\n self.deconv_out = deconv_out\n self.tanh = nn.Tanh()\n\n def forward(self, input):\n output = self.preprocess(input)\n output = output.view(-1, 4 * DIM, 4, 4)\n output = self.block1(output)\n output = self.block2(output)\n output = self.deconv_out(output)\n output = self.tanh(output)\n return output.view(-1, 3, 32, 32)\n\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n main = nn.Sequential(\n nn.Conv2d(3, DIM, 3, 2, padding=1),\n nn.LeakyReLU(),\n nn.Conv2d(DIM, 2 * DIM, 3, 2, padding=1),\n nn.LeakyReLU(),\n nn.Conv2d(2 * DIM, 4 * DIM, 3, 2, padding=1),\n nn.LeakyReLU(),\n )\n\n self.main = main\n self.linear = nn.Linear(4*4*4*DIM, 1)\n\n def forward(self, input):\n output = self.main(input)\n output = output.view(-1, 4*4*4*DIM)\n output = self.linear(output)\n return output\n\nnetG = Generator()\nnetD = Discriminator()\n\nuse_cuda = torch.cuda.is_available()\nif use_cuda:\n gpu = 0\nif use_cuda:\n netD = netD.cuda(gpu)\n netG = netG.cuda(gpu)\n\none = torch.FloatTensor([1])\nmone = one * -1\nif use_cuda:\n one = one.cuda(gpu)\n mone = mone.cuda(gpu)\n\noptimizerD = optim.Adam(netD.parameters(), lr=1e-4, betas=(0.5, 0.9))\noptimizerG = optim.Adam(netG.parameters(), lr=1e-4, betas=(0.5, 0.9))\n\ndef calc_gradient_penalty(netD, real_data, fake_data):\n # print \"real_data: \", real_data.size(), fake_data.size()\n alpha = torch.rand(BATCH_SIZE, 1)\n alpha = alpha.expand(BATCH_SIZE, real_data.nelement()//BATCH_SIZE).contiguous().view(BATCH_SIZE, 3, 32, 32)\n alpha = alpha.cuda(gpu) if use_cuda else alpha\n\n interpolates = alpha * real_data + ((1 - alpha) * fake_data)\n\n if use_cuda:\n interpolates = interpolates.cuda(gpu)\n interpolates = autograd.Variable(interpolates, requires_grad=True)\n\n disc_interpolates = netD(interpolates)\n\n gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,\n grad_outputs=torch.ones(disc_interpolates.size()).cuda(gpu) if use_cuda else torch.ones(\n disc_interpolates.size()),\n create_graph=True, retain_graph=True, only_inputs=True)[0]\n gradients = gradients.view(gradients.size(0), -1)\n\n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA\n return gradient_penalty\n\n# For generating samples\ndef generate_image(frame, netG):\n fixed_noise_128 = torch.randn(128, 128)\n if use_cuda:\n fixed_noise_128 = fixed_noise_128.cuda(gpu)\n noisev = autograd.Variable(fixed_noise_128, volatile=True)\n samples = netG(noisev)\n samples = samples.view(-1, 3, 32, 32)\n samples = samples.mul(0.5).add(0.5)\n samples = samples.cpu().data.numpy()\n\n lib.save_images.save_images(samples, './tmp/cifar10/{}/samples_{}.jpg'.format(EXP, frame))\n\n# For calculating inception score\ndef get_inception_score(G, ):\n all_samples = []\n for i in range(10):\n samples_100 = torch.randn(100, 128)\n if use_cuda:\n samples_100 = samples_100.cuda(gpu)\n samples_100 = autograd.Variable(samples_100, volatile=True)\n all_samples.append(G(samples_100).cpu().data.numpy())\n\n all_samples = np.concatenate(all_samples, axis=0)\n all_samples = np.multiply(np.add(np.multiply(all_samples, 0.5), 0.5), 255).astype('int32')\n all_samples = all_samples.reshape((-1, 3, 32, 32)).transpose(0, 2, 3, 1)\n return lib.inception_score.get_inception_score(list(all_samples))\n\n# Dataset iterator\ntrain_gen, dev_gen = lib.cifar10.load(BATCH_SIZE, data_dir=DATA_DIR)\ndef inf_train_gen():\n while True:\n for images, target in train_gen():\n # yield images.astype('float32').reshape(BATCH_SIZE, 3, 32, 32).transpose(0, 2, 3, 1)\n yield images\ngen = inf_train_gen()\npreprocess = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n\nfor iteration in range(ITERS):\n start_time = time.time()\n ############################\n # (1) Update D network\n ###########################\n for p in netD.parameters(): # reset requires_grad\n p.requires_grad = True # they are set to False below in netG update\n for i in range(CRITIC_ITERS):\n _data = gen.__next__()\n netD.zero_grad()\n\n # train with real\n _data = _data.reshape(BATCH_SIZE, 3, 32, 32).transpose(0, 2, 3, 1)\n real_data = torch.stack([preprocess(item) for item in _data])\n\n if use_cuda:\n real_data = real_data.cuda(gpu)\n real_data_v = autograd.Variable(real_data)\n\n # import torchvision\n # filename = os.path.join(\"test_train_data\", str(iteration) + str(i) + \".jpg\")\n # torchvision.utils.save_image(real_data, filename)\n\n D_real = netD(real_data_v)\n D_real = D_real.mean()\n D_real.backward(mone)\n\n # train with fake\n noise = torch.randn(BATCH_SIZE, 128)\n if use_cuda:\n noise = noise.cuda(gpu)\n noisev = autograd.Variable(noise, volatile=True) # totally freeze netG\n fake = autograd.Variable(netG(noisev).data)\n inputv = fake\n D_fake = netD(inputv)\n D_fake = D_fake.mean()\n D_fake.backward(one)\n\n # train with gradient penalty\n gradient_penalty = calc_gradient_penalty(netD, real_data_v.data, fake.data)\n gradient_penalty.backward()\n\n # print \"gradien_penalty: \", gradient_penalty\n\n D_cost = D_fake - D_real + gradient_penalty\n Wasserstein_D = D_real - D_fake\n optimizerD.step()\n ############################\n # (2) Update G network\n ###########################\n for p in netD.parameters():\n p.requires_grad = False # to avoid computation\n netG.zero_grad()\n\n noise = torch.randn(BATCH_SIZE, 128)\n if use_cuda:\n noise = noise.cuda(gpu)\n noisev = autograd.Variable(noise)\n fake = netG(noisev)\n G = netD(fake)\n G = G.mean()\n G.backward(mone)\n G_cost = -G\n optimizerG.step()\n\n # Write logs and save samples\n lib.plot.plot('./tmp/cifar10/{}/train disc cost'.format(EXP), D_cost.cpu().data.numpy())\n lib.plot.plot('./tmp/cifar10/{}/time'.format(EXP), time.time() - start_time)\n lib.plot.plot('./tmp/cifar10/{}/train gen cost'.format(EXP), G_cost.cpu().data.numpy())\n lib.plot.plot('./tmp/cifar10/{}/wasserstein distance'.format(EXP), Wasserstein_D.cpu().data.numpy())\n\n # Calculate inception score every 1K iters\n if iteration % 1000 == 999:\n inception_score = get_inception_score(netG)\n lib.plot.plot('./tmp/cifar10/{}/inception score'.format(EXP), inception_score[0])\n\n # Calculate dev loss and generate samples every 100 iters\n if iteration % 100 == 99:\n dev_disc_costs = []\n for images, _ in dev_gen():\n images = images.reshape(BATCH_SIZE, 3, 32, 32).transpose(0, 2, 3, 1)\n imgs = torch.stack([preprocess(item) for item in images])\n\n # imgs = preprocess(images)\n if use_cuda:\n imgs = imgs.cuda(gpu)\n imgs_v = autograd.Variable(imgs, volatile=True)\n\n D = netD(imgs_v)\n _dev_disc_cost = -D.mean().cpu().data.numpy()\n dev_disc_costs.append(_dev_disc_cost)\n lib.plot.plot('./tmp/cifar10/{}/dev disc cost'.format(EXP), np.mean(dev_disc_costs))\n\n generate_image(iteration, netG)\n\n # Save logs every 100 iters\n if (iteration < 5) or (iteration % 100 == 99):\n lib.plot.flush()\n lib.plot.tick()\n" ]
[ [ "torch.nn.ConvTranspose2d", "numpy.multiply", "torch.randn", "torch.nn.Conv2d", "torch.nn.Tanh", "numpy.concatenate", "torch.nn.Linear", "torch.FloatTensor", "torch.rand", "torch.cuda.is_available", "torch.nn.BatchNorm2d", "torch.nn.LeakyReLU", "numpy.mean", "torch.nn.ReLU", "torch.autograd.Variable" ] ]
jeff41404/transformers
[ "825925dfaa5bd6f1dfb92a597ca89d69720772b6" ]
[ "src/transformers/modeling_bart.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch BART model, ported from the fairseq repo.\"\"\"\nimport math\nimport random\nimport warnings\nfrom typing import Dict, List, Optional, Tuple\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch import Tensor, nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom .activations import ACT2FN\nfrom .configuration_bart import BartConfig\nfrom .file_utils import (\n add_code_sample_docstrings,\n add_end_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom .modeling_outputs import (\n BaseModelOutput,\n BaseModelOutputWithPast,\n Seq2SeqLMOutput,\n Seq2SeqModelOutput,\n Seq2SeqQuestionAnsweringModelOutput,\n Seq2SeqSequenceClassifierOutput,\n)\nfrom .modeling_utils import PreTrainedModel\nfrom .utils import logging\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"BartConfig\"\n_TOKENIZER_FOR_DOC = \"BartTokenizer\"\n\n\nBART_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"facebook/bart-base\",\n \"facebook/bart-large\",\n \"facebook/bart-large-mnli\",\n \"facebook/bart-large-cnn\",\n \"facebook/bart-large-xsum\",\n \"facebook/mbart-large-en-ro\",\n]\n# This list is incomplete. See all BART models at https://huggingface.co/models?filter=bart\n\n\nBART_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.BartConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\n\"\"\"\n\nBART_GENERATION_EXAMPLE = r\"\"\"\n Summarization example::\n\n >>> from transformers import BartTokenizer, BartForConditionalGeneration, BartConfig\n\n >>> # see ``examples/summarization/bart/run_eval.py`` for a longer example\n >>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')\n >>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')\n\n >>> ARTICLE_TO_SUMMARIZE = \"My friends are cool but they eat too many carbs.\"\n >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt')\n\n >>> # Generate Summary\n >>> summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5, early_stopping=True)\n >>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])\n\n\"\"\"\n\nBART_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using :class:`~transformers.BartTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):\n Provide for translation and summarization training. By default, the model will create this tensor by\n shifting the :obj:`input_ids` to the right, following the paper.\n decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`):\n Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will\n also be used by default.\n\n If you want to change padding behavior, you should read :func:`modeling_bart._prepare_decoder_inputs` and\n modify to your needs. See diagram 1 in `the paper <https://arxiv.org/abs/1910.13461>`__ for more\n information on the default strategy.\n encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):\n Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`:\n :obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`,\n `optional`) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the\n cross-attention of the decoder.\n past_key_values (:obj:`Tuple[Dict[str: tf.Tensor]]` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids``\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\ndef invert_mask(attention_mask):\n \"\"\"Turns 1->0, 0->1, False->True, True-> False\"\"\"\n assert attention_mask.dim() == 2\n return attention_mask.eq(0)\n\n\ndef _prepare_bart_decoder_inputs(\n config, input_ids, decoder_input_ids=None, decoder_padding_mask=None, causal_mask_dtype=torch.float32\n):\n \"\"\"\n Prepare masks that ignore padding tokens in the decoder and a causal mask for the decoder if none are provided.\n This mimics the default behavior in fairseq. To override it pass in masks. Note: this is not called during\n generation\n \"\"\"\n pad_token_id = config.pad_token_id\n if decoder_input_ids is None:\n decoder_input_ids = shift_tokens_right(input_ids, pad_token_id)\n bsz, tgt_len = decoder_input_ids.size()\n if decoder_padding_mask is None:\n decoder_padding_mask = make_padding_mask(decoder_input_ids, pad_token_id)\n else:\n decoder_padding_mask = invert_mask(decoder_padding_mask)\n if decoder_padding_mask is not None and decoder_padding_mask.shape[1] > 1:\n # never mask leading token, even if it is pad\n decoder_padding_mask[:, 0] = decoder_padding_mask[:, 1]\n tmp = fill_with_neg_inf(torch.zeros(tgt_len, tgt_len))\n mask = torch.arange(tmp.size(-1))\n tmp.masked_fill_(mask < (mask + 1).view(tmp.size(-1), 1), 0)\n causal_mask = tmp.to(dtype=causal_mask_dtype, device=decoder_input_ids.device)\n return decoder_input_ids, decoder_padding_mask, causal_mask\n\n\nclass PretrainedBartModel(PreTrainedModel):\n config_class = BartConfig\n base_model_prefix = \"model\"\n\n def _init_weights(self, module):\n std = self.config.init_std\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=std)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, SinusoidalPositionalEmbedding):\n pass\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=std)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n\n @property\n def dummy_inputs(self):\n pad_token = self.config.pad_token_id\n input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)\n dummy_inputs = {\n \"attention_mask\": input_ids.ne(pad_token),\n \"input_ids\": input_ids,\n }\n return dummy_inputs\n\n\ndef _make_linear_from_emb(emb):\n vocab_size, emb_size = emb.weight.shape\n lin_layer = nn.Linear(vocab_size, emb_size, bias=False)\n lin_layer.weight.data = emb.weight.data\n return lin_layer\n\n\ndef shift_tokens_right(input_ids, pad_token_id):\n \"\"\"Shift input ids one token to the right, and wrap the last non pad token (usually <eos>).\"\"\"\n prev_output_tokens = input_ids.clone()\n index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)\n prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze()\n prev_output_tokens[:, 1:] = input_ids[:, :-1]\n return prev_output_tokens\n\n\ndef make_padding_mask(input_ids, padding_idx=1):\n \"\"\"True for pad tokens\"\"\"\n padding_mask = input_ids.eq(padding_idx)\n if not padding_mask.any():\n padding_mask = None\n return padding_mask\n\n\n# Helper Modules\n\n\nclass EncoderLayer(nn.Module):\n def __init__(self, config: BartConfig):\n super().__init__()\n self.embed_dim = config.d_model\n self.self_attn = Attention(self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout)\n self.normalize_before = config.normalize_before\n self.self_attn_layer_norm = LayerNorm(self.embed_dim)\n self.dropout = config.dropout\n self.activation_fn = ACT2FN[config.activation_function]\n self.activation_dropout = config.activation_dropout\n self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)\n self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)\n self.final_layer_norm = LayerNorm(self.embed_dim)\n\n def forward(self, x, encoder_padding_mask, output_attentions=False):\n \"\"\"\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\n `(batch, src_len)` where padding elements are indicated by ``1``.\n for t_tgt, t_src is excluded (or masked out), =0 means it is\n included in attention\n\n Returns:\n encoded output of shape `(seq_len, batch, embed_dim)`\n \"\"\"\n residual = x\n if self.normalize_before:\n x = self.self_attn_layer_norm(x)\n x, attn_weights = self.self_attn(\n query=x, key=x, key_padding_mask=encoder_padding_mask, output_attentions=output_attentions\n )\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n if not self.normalize_before:\n x = self.self_attn_layer_norm(x)\n\n residual = x\n if self.normalize_before:\n x = self.final_layer_norm(x)\n x = self.activation_fn(self.fc1(x))\n x = F.dropout(x, p=self.activation_dropout, training=self.training)\n x = self.fc2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n if not self.normalize_before:\n x = self.final_layer_norm(x)\n if torch.isinf(x).any() or torch.isnan(x).any():\n clamp_value = torch.finfo(x.dtype).max - 1000\n x = torch.clamp(x, min=-clamp_value, max=clamp_value)\n return x, attn_weights\n\n\nclass BartEncoder(nn.Module):\n \"\"\"\n Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a\n :class:`EncoderLayer`.\n\n Args:\n config: BartConfig\n \"\"\"\n\n def __init__(self, config: BartConfig, embed_tokens):\n super().__init__()\n\n self.dropout = config.dropout\n self.layerdrop = config.encoder_layerdrop\n\n embed_dim = embed_tokens.embedding_dim\n self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0\n self.padding_idx = embed_tokens.padding_idx\n self.max_source_positions = config.max_position_embeddings\n\n self.embed_tokens = embed_tokens\n if config.static_position_embeddings:\n self.embed_positions = SinusoidalPositionalEmbedding(\n config.max_position_embeddings, embed_dim, self.padding_idx\n )\n else:\n self.embed_positions = LearnedPositionalEmbedding(\n config.max_position_embeddings,\n embed_dim,\n self.padding_idx,\n config.extra_pos_embeddings,\n )\n self.layers = nn.ModuleList([EncoderLayer(config) for _ in range(config.encoder_layers)])\n self.layernorm_embedding = LayerNorm(embed_dim) if config.normalize_embedding else nn.Identity()\n # mbart has one extra layer_norm\n self.layer_norm = LayerNorm(config.d_model) if config.add_final_layer_norm else None\n\n def forward(\n self, input_ids, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=False\n ):\n \"\"\"\n Args:\n input_ids (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n attention_mask (torch.LongTensor): indicating which indices are padding tokens\n\n Returns:\n BaseModelOutput or Tuple comprised of:\n\n - **x** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)`\n - **encoder_states** (tuple(torch.FloatTensor)): all intermediate hidden states of shape `(src_len,\n batch, embed_dim)`. Only populated if *output_hidden_states:* is True.\n - **all_attentions** (tuple(torch.FloatTensor)): Attention weights for each layer.\n During training might not be of length n_layers because of layer dropout.\n \"\"\"\n # check attention mask and invert\n if attention_mask is not None:\n attention_mask = invert_mask(attention_mask)\n\n inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale\n embed_pos = self.embed_positions(input_ids)\n x = inputs_embeds + embed_pos\n x = self.layernorm_embedding(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n encoder_states = [] if output_hidden_states else None\n all_attentions = () if output_attentions else None\n for encoder_layer in self.layers:\n if output_hidden_states:\n encoder_states.append(x)\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = random.uniform(0, 1)\n if self.training and (dropout_probability < self.layerdrop): # skip the layer\n attn = None\n else:\n x, attn = encoder_layer(x, attention_mask, output_attentions=output_attentions)\n\n if output_attentions:\n all_attentions = all_attentions + (attn,)\n\n if self.layer_norm:\n x = self.layer_norm(x)\n if output_hidden_states:\n encoder_states.append(x)\n # T x B x C -> B x T x C\n encoder_states = tuple(hidden_state.transpose(0, 1) for hidden_state in encoder_states)\n\n # T x B x C -> B x T x C\n x = x.transpose(0, 1)\n\n if not return_dict:\n return tuple(v for v in [x, encoder_states, all_attentions] if v is not None)\n return BaseModelOutput(last_hidden_state=x, hidden_states=encoder_states, attentions=all_attentions)\n\n\nclass DecoderLayer(nn.Module):\n def __init__(self, config: BartConfig):\n super().__init__()\n self.embed_dim = config.d_model\n\n self.self_attn = Attention(\n embed_dim=self.embed_dim,\n num_heads=config.decoder_attention_heads,\n dropout=config.attention_dropout,\n )\n self.dropout = config.dropout\n self.activation_fn = ACT2FN[config.activation_function]\n self.activation_dropout = config.activation_dropout\n self.normalize_before = config.normalize_before\n\n self.self_attn_layer_norm = LayerNorm(self.embed_dim)\n self.encoder_attn = Attention(\n self.embed_dim,\n config.decoder_attention_heads,\n dropout=config.attention_dropout,\n encoder_decoder_attention=True,\n )\n self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)\n self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)\n self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)\n self.final_layer_norm = LayerNorm(self.embed_dim)\n\n def forward(\n self,\n x,\n encoder_hidden_states,\n encoder_attn_mask=None,\n layer_state=None,\n causal_mask=None,\n decoder_padding_mask=None,\n output_attentions=False,\n ):\n residual = x\n\n if layer_state is None:\n layer_state = {}\n if self.normalize_before:\n x = self.self_attn_layer_norm(x)\n # Self Attention\n\n x, self_attn_weights = self.self_attn(\n query=x,\n key=x,\n layer_state=layer_state, # adds keys to layer state\n key_padding_mask=decoder_padding_mask,\n attn_mask=causal_mask,\n output_attentions=output_attentions,\n )\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n if not self.normalize_before:\n x = self.self_attn_layer_norm(x)\n\n # Cross attention\n residual = x\n assert self.encoder_attn.cache_key != self.self_attn.cache_key\n if self.normalize_before:\n x = self.encoder_attn_layer_norm(x)\n x, _ = self.encoder_attn(\n query=x,\n key=encoder_hidden_states,\n key_padding_mask=encoder_attn_mask,\n layer_state=layer_state, # mutates layer state\n )\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n if not self.normalize_before:\n x = self.encoder_attn_layer_norm(x)\n\n # Fully Connected\n residual = x\n if self.normalize_before:\n x = self.final_layer_norm(x)\n x = self.activation_fn(self.fc1(x))\n x = F.dropout(x, p=self.activation_dropout, training=self.training)\n x = self.fc2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n if not self.normalize_before:\n x = self.final_layer_norm(x)\n return (\n x,\n self_attn_weights,\n layer_state,\n ) # just self_attn weights for now, following t5, layer_state = cache for decoding\n\n\nclass BartDecoder(nn.Module):\n \"\"\"\n Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`DecoderLayer`\n\n Args:\n config: BartConfig\n embed_tokens (torch.nn.Embedding): output embedding\n \"\"\"\n\n def __init__(self, config: BartConfig, embed_tokens: nn.Embedding):\n super().__init__()\n self.dropout = config.dropout\n self.layerdrop = config.decoder_layerdrop\n self.do_blenderbot_90_layernorm = config.do_blenderbot_90_layernorm # layernorm variant\n self.padding_idx = embed_tokens.padding_idx\n self.max_target_positions = config.max_position_embeddings\n self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0\n self.embed_tokens = embed_tokens\n if config.static_position_embeddings:\n self.embed_positions = SinusoidalPositionalEmbedding(\n config.max_position_embeddings, config.d_model, config.pad_token_id\n )\n else:\n self.embed_positions = LearnedPositionalEmbedding(\n config.max_position_embeddings,\n config.d_model,\n self.padding_idx,\n config.extra_pos_embeddings,\n )\n self.layers = nn.ModuleList(\n [DecoderLayer(config) for _ in range(config.decoder_layers)]\n ) # type: List[DecoderLayer]\n self.layernorm_embedding = LayerNorm(config.d_model) if config.normalize_embedding else nn.Identity()\n self.layer_norm = LayerNorm(config.d_model) if config.add_final_layer_norm else None\n\n def forward(\n self,\n input_ids,\n encoder_hidden_states,\n encoder_padding_mask,\n decoder_padding_mask,\n decoder_causal_mask,\n past_key_values=None,\n use_cache=False,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=False,\n **unused,\n ):\n \"\"\"\n Includes several features from \"Jointly Learning to Align and Translate with Transformer Models\" (Garg et al.,\n EMNLP 2019).\n\n Args:\n input_ids (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for teacher forcing\n encoder_hidden_states: output from the encoder, used for\n encoder-side attention\n encoder_padding_mask: for ignoring pad tokens\n past_key_values (dict or None): dictionary used for storing state during generation\n\n Returns:\n BaseModelOutputWithPast or tuple:\n\n - the decoder's features of shape `(batch, tgt_len, embed_dim)`\n - the cache\n - hidden states\n - attentions\n \"\"\"\n if \"decoder_cached_states\" in unused:\n warnings.warn(\n \"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = unused.pop(\"decoder_cached_states\")\n if \"decoder_past_key_values\" in unused:\n warnings.warn(\n \"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = unused.pop(\"decoder_past_key_values\")\n\n # check attention mask and invert\n if encoder_padding_mask is not None:\n encoder_padding_mask = invert_mask(encoder_padding_mask)\n\n # embed positions\n positions = self.embed_positions(input_ids, use_cache=use_cache)\n\n if use_cache:\n input_ids = input_ids[:, -1:]\n positions = positions[:, -1:]\n\n x = self.embed_tokens(input_ids) * self.embed_scale\n if self.do_blenderbot_90_layernorm:\n x = self.layernorm_embedding(x)\n x += positions\n else:\n x += positions\n x = self.layernorm_embedding(x)\n\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n # Convert to Bart output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)\n x = x.transpose(0, 1)\n encoder_hidden_states = encoder_hidden_states.transpose(0, 1)\n\n # decoder layers\n all_hidden_states = () if output_hidden_states else None\n all_self_attns = () if output_attentions else None\n next_decoder_cache: List[Dict] = []\n for idx, decoder_layer in enumerate(self.layers):\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n if output_hidden_states:\n all_hidden_states += (x,)\n dropout_probability = random.uniform(0, 1)\n if self.training and (dropout_probability < self.layerdrop):\n continue\n\n layer_state = past_key_values[idx] if past_key_values is not None else None\n\n x, layer_self_attn, layer_past = decoder_layer(\n x,\n encoder_hidden_states,\n encoder_attn_mask=encoder_padding_mask,\n decoder_padding_mask=decoder_padding_mask,\n layer_state=layer_state,\n causal_mask=decoder_causal_mask,\n output_attentions=output_attentions,\n )\n\n if use_cache:\n next_decoder_cache.append(layer_past.copy())\n\n if output_attentions:\n all_self_attns += (layer_self_attn,)\n\n if self.layer_norm: # if config.add_final_layer_norm (mBART)\n x = self.layer_norm(x)\n\n # Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)\n if output_hidden_states:\n all_hidden_states = tuple(hidden_state.transpose(0, 1) for hidden_state in all_hidden_states)\n x = x.transpose(0, 1)\n encoder_hidden_states = encoder_hidden_states.transpose(0, 1)\n\n next_cache = next_decoder_cache if use_cache else None\n\n if not return_dict:\n return tuple(v for v in [x, next_cache, all_hidden_states, all_self_attns] if v is not None)\n return BaseModelOutputWithPast(\n last_hidden_state=x, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns\n )\n\n\ndef _reorder_buffer(attn_cache: Dict, new_order) -> Dict:\n for k, input_buffer_k in attn_cache.items():\n if input_buffer_k is not None:\n attn_cache[k] = input_buffer_k.index_select(0, new_order)\n return attn_cache\n\n\nclass Attention(nn.Module):\n \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n def __init__(\n self,\n embed_dim,\n num_heads,\n dropout=0.0,\n bias=True,\n encoder_decoder_attention=False, # otherwise self_attention\n ):\n super().__init__()\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n assert self.head_dim * num_heads == self.embed_dim, \"embed_dim must be divisible by num_heads\"\n self.scaling = self.head_dim ** -0.5\n\n self.encoder_decoder_attention = encoder_decoder_attention\n self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.cache_key = \"encoder_decoder\" if self.encoder_decoder_attention else \"self\"\n\n def _shape(self, tensor, seq_len, bsz):\n return tensor.contiguous().view(seq_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n\n def forward(\n self,\n query,\n key: Tensor,\n key_padding_mask: Optional[Tensor] = None,\n layer_state: Optional[Dict[str, Tensor]] = None,\n attn_mask: Optional[Tensor] = None,\n output_attentions=False,\n ) -> Tuple[Tensor, Optional[Tensor]]:\n \"\"\"Input shape: Time(SeqLen) x Batch x Channel\"\"\"\n static_kv: bool = self.encoder_decoder_attention\n tgt_len, bsz, embed_dim = query.size()\n # get here for encoder decoder cause of static_kv\n if layer_state is not None: # reuse k,v and encoder_padding_mask\n saved_state = layer_state.get(self.cache_key, {})\n if \"prev_key\" in saved_state and static_kv:\n # previous time steps are cached - no need to recompute key and value if they are static\n key = None\n else:\n # this branch is hit by encoder\n saved_state = None\n\n q = self.q_proj(query) * self.scaling\n if static_kv and key is None: # cross-attention with cache\n k = v = None\n elif static_kv and key is not None: # cross-attention no prev_key found in cache\n k = self.k_proj(key)\n v = self.v_proj(key)\n else: # self-attention\n k = self.k_proj(query)\n v = self.v_proj(query)\n\n q = self._shape(q, tgt_len, bsz)\n if k is not None:\n k = self._shape(k, -1, bsz)\n if v is not None:\n v = self._shape(v, -1, bsz)\n\n if saved_state:\n k, v = self._concat_saved_state(k, v, saved_state, static_kv, bsz)\n\n # Update cache\n if isinstance(layer_state, dict):\n cached_shape = (bsz, self.num_heads, -1, self.head_dim) # bsz must be first for reorder_cache\n layer_state[self.cache_key] = dict(prev_key=k.view(*cached_shape), prev_value=v.view(*cached_shape))\n\n src_len = k.size(1)\n assert key_padding_mask is None or key_padding_mask.shape == (bsz, src_len)\n attn_weights = torch.bmm(q, k.transpose(1, 2))\n assert attn_weights.size() == (bsz * self.num_heads, tgt_len, src_len)\n\n if attn_mask is not None:\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_mask\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n # Note: deleted workaround to get around fork/join parallelism not supporting Optional types. on 2020/10/15\n\n if key_padding_mask is not None: # don't attend to padding symbols\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n reshaped = key_padding_mask.unsqueeze(1).unsqueeze(2)\n attn_weights = attn_weights.masked_fill(reshaped, float(\"-inf\"))\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n attn_weights = F.softmax(attn_weights, dim=-1)\n attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training)\n\n assert v is not None\n attn_output = torch.bmm(attn_probs, v)\n assert attn_output.size() == (bsz * self.num_heads, tgt_len, self.head_dim)\n attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)\n attn_output = self.out_proj(attn_output)\n if output_attentions:\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n else:\n attn_weights = None\n return attn_output, attn_weights\n\n def _concat_saved_state(self, k, v, saved_state, static_kv, bsz) -> Tuple[Tensor]:\n # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)\n prev_K = saved_state[\"prev_key\"].view(bsz * self.num_heads, -1, self.head_dim)\n prev_V = saved_state[\"prev_value\"].view(bsz * self.num_heads, -1, self.head_dim)\n new_K = prev_K if static_kv else torch.cat([prev_K, k], dim=1)\n new_V = prev_V if static_kv else torch.cat([prev_V, v], dim=1)\n return new_K, new_V\n\n\nclass BartClassificationHead(nn.Module):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n\n # This can trivially be shared with RobertaClassificationHead\n\n def __init__(\n self,\n input_dim,\n inner_dim,\n num_classes,\n pooler_dropout,\n ):\n super().__init__()\n self.dense = nn.Linear(input_dim, inner_dim)\n self.dropout = nn.Dropout(p=pooler_dropout)\n self.out_proj = nn.Linear(inner_dim, num_classes)\n\n def forward(self, x):\n x = self.dropout(x)\n x = self.dense(x)\n x = torch.tanh(x)\n x = self.dropout(x)\n x = self.out_proj(x)\n return x\n\n\nclass LearnedPositionalEmbedding(nn.Embedding):\n \"\"\"\n This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting\n based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to\n the forward function.\n \"\"\"\n\n def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, offset):\n # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2\n # and adjust num_embeddings appropriately. Other models dont have this hack\n self.offset = offset\n assert padding_idx is not None\n num_embeddings += offset\n super().__init__(num_embeddings, embedding_dim, padding_idx=padding_idx)\n\n def forward(self, input_ids, use_cache=False):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n bsz, seq_len = input_ids.shape[:2]\n if use_cache:\n positions = input_ids.data.new(1, 1).fill_(seq_len - 1) # called before slicing\n else:\n # starts at 0, ends at 1-seq_len\n positions = torch.arange(seq_len, dtype=torch.long, device=self.weight.device)\n return super().forward(positions + self.offset)\n\n\ndef LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True):\n if torch.cuda.is_available():\n try:\n from apex.normalization import FusedLayerNorm\n\n return FusedLayerNorm(normalized_shape, eps, elementwise_affine)\n except ImportError:\n pass\n return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)\n\n\ndef fill_with_neg_inf(t):\n \"\"\"FP16-compatible function that fills a input_ids with -inf.\"\"\"\n return t.float().fill_(float(\"-inf\")).type_as(t)\n\n\n# Public API\ndef _get_shape(t):\n return getattr(t, \"shape\", None)\n\n\n@add_start_docstrings(\n \"The bare BART Model outputting raw hidden-states without any specific head on top.\",\n BART_START_DOCSTRING,\n)\nclass BartModel(PretrainedBartModel):\n def __init__(self, config: BartConfig):\n super().__init__(config)\n\n padding_idx, vocab_size = config.pad_token_id, config.vocab_size\n self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)\n\n self.encoder = BartEncoder(config, self.shared)\n self.decoder = BartDecoder(config, self.shared)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"facebook/bart-large\",\n output_type=Seq2SeqModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n encoder_outputs: Optional[Tuple] = None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n if \"decoder_past_key_values\" in kwargs:\n warnings.warn(\n \"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = kwargs.pop(\"decoder_past_key_values\")\n\n if decoder_input_ids is None:\n use_cache = False\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # make masks if user doesn't supply\n if not use_cache:\n decoder_input_ids, decoder_padding_mask, causal_mask = _prepare_bart_decoder_inputs(\n self.config,\n input_ids,\n decoder_input_ids=decoder_input_ids,\n decoder_padding_mask=decoder_attention_mask,\n causal_mask_dtype=self.shared.weight.dtype,\n )\n else:\n decoder_padding_mask, causal_mask = None, None\n\n assert decoder_input_ids is not None\n\n if encoder_outputs is None:\n encoder_outputs = self.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOuput when return_dict=False\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\n encoder_outputs = BaseModelOutput(\n last_hidden_state=encoder_outputs[0],\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\n )\n\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n decoder_outputs = self.decoder(\n decoder_input_ids,\n encoder_outputs[0],\n attention_mask,\n decoder_padding_mask,\n decoder_causal_mask=causal_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if not return_dict:\n return decoder_outputs + encoder_outputs\n\n return Seq2SeqModelOutput(\n last_hidden_state=decoder_outputs.last_hidden_state,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n )\n\n def get_input_embeddings(self):\n return self.shared\n\n def set_input_embeddings(self, value):\n self.shared = value\n self.encoder.embed_tokens = self.shared\n self.decoder.embed_tokens = self.shared\n\n def get_output_embeddings(self):\n return _make_linear_from_emb(self.shared) # make it on the fly\n\n\n@add_start_docstrings(\n \"The BART Model with a language modeling head. Can be used for summarization.\", BART_START_DOCSTRING\n)\nclass BartForConditionalGeneration(PretrainedBartModel):\n base_model_prefix = \"model\"\n authorized_missing_keys = [r\"final_logits_bias\", r\"encoder\\.version\", r\"decoder\\.version\"]\n\n def __init__(self, config: BartConfig):\n super().__init__(config)\n base_model = BartModel(config)\n self.model = base_model\n self.register_buffer(\"final_logits_bias\", torch.zeros((1, self.model.shared.num_embeddings)))\n\n def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:\n old_num_tokens = self.model.shared.num_embeddings\n new_embeddings = super().resize_token_embeddings(new_num_tokens)\n self.model.shared = new_embeddings\n self._resize_final_logits_bias(new_num_tokens, old_num_tokens)\n return new_embeddings\n\n def _resize_final_logits_bias(self, new_num_tokens: int, old_num_tokens: int) -> None:\n if new_num_tokens <= old_num_tokens:\n new_bias = self.final_logits_bias[:, :new_num_tokens]\n else:\n extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)\n new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)\n self.register_buffer(\"final_logits_bias\", new_bias)\n\n @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)\n @add_end_docstrings(BART_GENERATION_EXAMPLE)\n def forward(\n self,\n input_ids,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n encoder_outputs=None,\n past_key_values=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **unused,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,\n config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.\n\n Returns:\n\n Conditional generation example::\n\n >>> # Mask filling only works for bart-large\n >>> from transformers import BartTokenizer, BartForConditionalGeneration\n >>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')\n >>> TXT = \"My friends are <mask> but they eat too many carbs.\"\n\n >>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large')\n >>> input_ids = tokenizer([TXT], return_tensors='pt')['input_ids']\n >>> logits = model(input_ids).logits\n\n >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()\n >>> probs = logits[0, masked_index].softmax(dim=0)\n >>> values, predictions = probs.topk(5)\n\n >>> tokenizer.decode(predictions).split()\n >>> # ['good', 'great', 'all', 'really', 'very']\n \"\"\"\n if \"lm_labels\" in unused:\n warnings.warn(\n \"The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.\",\n FutureWarning,\n )\n labels = unused.pop(\"lm_labels\")\n if \"decoder_cached_states\" in unused:\n warnings.warn(\n \"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = unused.pop(\"decoder_cached_states\")\n if \"decoder_past_key_values\" in unused:\n warnings.warn(\n \"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = unused.pop(\"decoder_past_key_values\")\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if labels is not None:\n use_cache = False\n if decoder_input_ids is None:\n decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id)\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n encoder_outputs=encoder_outputs,\n decoder_attention_mask=decoder_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n lm_logits = F.linear(outputs[0], self.model.shared.weight, bias=self.final_logits_bias)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # TODO(SS): do we need to ignore pad tokens in labels?\n masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + outputs[1:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return Seq2SeqLMOutput(\n loss=masked_lm_loss,\n logits=lm_logits,\n past_key_values=outputs.past_key_values,\n decoder_hidden_states=outputs.decoder_hidden_states,\n decoder_attentions=outputs.decoder_attentions,\n encoder_last_hidden_state=outputs.encoder_last_hidden_state,\n encoder_hidden_states=outputs.encoder_hidden_states,\n encoder_attentions=outputs.encoder_attentions,\n )\n\n def prepare_inputs_for_generation(\n self, decoder_input_ids, past, attention_mask, use_cache, encoder_outputs, **kwargs\n ):\n return {\n \"input_ids\": None, # encoder_outputs is defined. input_ids not needed\n \"encoder_outputs\": encoder_outputs,\n \"past_key_values\": past,\n \"decoder_input_ids\": decoder_input_ids,\n \"attention_mask\": attention_mask,\n \"use_cache\": use_cache, # change this to avoid caching (presumably for debugging)\n }\n\n def adjust_logits_during_generation(self, logits, cur_len, max_length):\n if cur_len == 1 and self.config.force_bos_token_to_be_generated:\n self._force_token_id_to_be_generated(logits, self.config.bos_token_id)\n elif cur_len == max_length - 1 and self.config.eos_token_id is not None:\n self._force_token_id_to_be_generated(logits, self.config.eos_token_id)\n return logits\n\n @staticmethod\n def _force_token_id_to_be_generated(scores, token_id) -> None:\n \"\"\"force one of token_ids to be generated by setting prob of all other tokens to 0 (logprob=-float(\"inf\"))\"\"\"\n scores[:, [x for x in range(scores.shape[1]) if x != token_id]] = -float(\"inf\")\n\n @staticmethod\n def _reorder_cache(past, beam_idx):\n reordered_past = []\n for layer_past in past:\n # get the correct batch idx from decoder layer's batch dim for cross and self-attn\n layer_past_new = {\n attn_key: _reorder_buffer(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items()\n }\n reordered_past.append(layer_past_new)\n return reordered_past\n\n def get_encoder(self):\n return self.model.encoder\n\n def get_output_embeddings(self):\n return _make_linear_from_emb(self.model.shared) # make it on the fly\n\n\n@add_start_docstrings(\n \"\"\"\n Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE\n tasks.\n \"\"\",\n BART_START_DOCSTRING,\n)\nclass BartForSequenceClassification(PretrainedBartModel):\n def __init__(self, config: BartConfig, **kwargs):\n super().__init__(config, **kwargs)\n self.model = BartModel(config)\n self.classification_head = BartClassificationHead(\n config.d_model,\n config.d_model,\n config.num_labels,\n config.classifier_dropout,\n )\n self.model._init_weights(self.classification_head.dense)\n self.model._init_weights(self.classification_head.out_proj)\n\n @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"facebook/bart-large\",\n output_type=Seq2SeqSequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n encoder_outputs=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if labels is not None:\n use_cache = False\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n encoder_outputs=encoder_outputs,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n x = outputs[0] # last hidden state\n eos_mask = input_ids.eq(self.config.eos_token_id)\n if len(torch.unique(eos_mask.sum(1))) > 1:\n raise ValueError(\"All examples must have the same number of <eos> tokens.\")\n sentence_representation = x[eos_mask, :].view(x.size(0), -1, x.size(-1))[:, -1, :]\n logits = self.classification_head(sentence_representation)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return Seq2SeqSequenceClassifierOutput(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n decoder_hidden_states=outputs.decoder_hidden_states,\n decoder_attentions=outputs.decoder_attentions,\n encoder_last_hidden_state=outputs.encoder_last_hidden_state,\n encoder_hidden_states=outputs.encoder_hidden_states,\n encoder_attentions=outputs.encoder_attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n BART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layer on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n BART_START_DOCSTRING,\n)\nclass BartForQuestionAnswering(PretrainedBartModel):\n def __init__(self, config):\n super().__init__(config)\n\n config.num_labels = 2\n self.num_labels = config.num_labels\n\n self.model = BartModel(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.model._init_weights(self.qa_outputs)\n\n @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"facebook/bart-large\",\n output_type=Seq2SeqQuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n encoder_outputs=None,\n start_positions=None,\n end_positions=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if start_positions is not None and end_positions is not None:\n use_cache = False\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n encoder_outputs=encoder_outputs,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (\n start_logits,\n end_logits,\n ) + outputs[1:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return Seq2SeqQuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n past_key_values=outputs.past_key_values,\n decoder_hidden_states=outputs.decoder_hidden_states,\n decoder_attentions=outputs.decoder_attentions,\n encoder_last_hidden_state=outputs.encoder_last_hidden_state,\n encoder_hidden_states=outputs.encoder_hidden_states,\n encoder_attentions=outputs.encoder_attentions,\n )\n\n\nclass SinusoidalPositionalEmbedding(nn.Embedding):\n \"\"\"This module produces sinusoidal positional embeddings of any length.\"\"\"\n\n def __init__(self, num_positions, embedding_dim, padding_idx=None):\n super().__init__(num_positions, embedding_dim)\n if embedding_dim % 2 != 0:\n raise NotImplementedError(f\"odd embedding_dim {embedding_dim} not supported\")\n self.weight = self._init_weight(self.weight)\n\n @staticmethod\n def _init_weight(out: nn.Parameter):\n \"\"\"\n Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in\n the 2nd half of the vector. [dim // 2:]\n \"\"\"\n n_pos, dim = out.shape\n position_enc = np.array(\n [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]\n )\n out[:, 0 : dim // 2] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) # This line breaks for odd n_pos\n out[:, dim // 2 :] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))\n out.detach_()\n out.requires_grad = False\n return out\n\n @torch.no_grad()\n def forward(self, input_ids, use_cache=False):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n bsz, seq_len = input_ids.shape[:2]\n if use_cache:\n positions = input_ids.data.new(1, 1).fill_(seq_len - 1) # called before slicing\n else:\n # starts at 0, ends at 1-seq_len\n positions = torch.arange(seq_len, dtype=torch.long, device=self.weight.device)\n return super().forward(positions)\n" ]
[ [ "torch.nn.functional.softmax", "torch.zeros", "torch.nn.functional.dropout", "torch.cat", "torch.nn.Embedding", "torch.tanh", "torch.no_grad", "torch.cuda.is_available", "torch.finfo", "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.tensor", "numpy.sin", "torch.bmm", "torch.arange", "torch.nn.functional.linear", "torch.isinf", "numpy.power", "torch.nn.Linear", "torch.isnan", "numpy.cos", "torch.nn.LayerNorm", "torch.nn.Identity", "torch.clamp" ] ]
jfreissmann/tespy
[ "70bf8da9fd8521a1177613a894829cd1fa78c663" ]
[ "src/tespy/networks/network.py" ]
[ "# -*- coding: utf-8\n\n\"\"\"Module for tespy network class.\n\nThe network is the container for every TESPy simulation. The network class\nautomatically creates the system of equations describing topology and\nparametrization of a specific model and solves it.\n\n\nThis file is part of project TESPy (github.com/oemof/tespy). It's copyrighted\nby the contributors recorded in the version control history of the file,\navailable from its original location tespy/networks/networks.py\n\nSPDX-License-Identifier: MIT\n\"\"\"\nimport ast\nimport json\nimport logging\nimport os\nfrom collections import Counter\nfrom collections import OrderedDict\nfrom time import time\n\nimport numpy as np\nimport pandas as pd\nfrom numpy.linalg import norm\nfrom tabulate import tabulate\n\nfrom tespy import connections as con\nfrom tespy.tools import fluid_properties as fp\nfrom tespy.tools import helpers as hlp\nfrom tespy.tools.data_containers import ComponentCharacteristicMaps as dc_cm\nfrom tespy.tools.data_containers import ComponentCharacteristics as dc_cc\nfrom tespy.tools.data_containers import ComponentProperties as dc_cp\nfrom tespy.tools.data_containers import DataContainerSimple as dc_simple\nfrom tespy.tools.data_containers import GroupedComponentCharacteristics as dc_gcc\nfrom tespy.tools.data_containers import GroupedComponentProperties as dc_gcp\nfrom tespy.tools.global_vars import err\nfrom tespy.tools.global_vars import fluid_property_data as fpd\n\n# Only require cupy if Cuda shall be used\ntry:\n import cupy as cu\nexcept ModuleNotFoundError:\n cu = None\n\n\nclass Network:\n r\"\"\"\n Class component is the base class of all TESPy components.\n\n Parameters\n ----------\n fluids : list\n A list of all fluids within the network container.\n\n memorise_fluid_properties : boolean\n Activate or deactivate fluid property value memorization. Default\n state is activated (:code:`True`).\n\n h_range : list\n List with minimum and maximum values for enthalpy value range.\n\n h_unit : str\n Specify the unit for enthalpy: 'J / kg', 'kJ / kg', 'MJ / kg'.\n\n iterinfo : boolean\n Print convergence progress to console.\n\n m_range : list\n List with minimum and maximum values for mass flow value range.\n\n m_unit : str\n Specify the unit for mass flow: 'kg / s', 't / h'.\n\n p_range : list\n List with minimum and maximum values for pressure value range.\n\n p_unit : str\n Specify the unit for pressure: 'Pa', 'psi', 'bar', 'MPa'.\n\n s_unit : str\n Specify the unit for specific entropy: 'J / kgK', 'kJ / kgK',\n 'MJ / kgK'.\n\n T_unit : str\n Specify the unit for temperature: 'K', 'C', 'F', 'R'.\n\n v_unit : str\n Specify the unit for volumetric flow: 'm3 / s', 'm3 / h', 'l / s',\n 'l / h'.\n\n vol_unit : str\n Specify the unit for specific volume: 'm3 / kg', 'l / kg'.\n\n x_unit : str\n Specify the unit for steam mass fraction: '-', '%'.\n\n Note\n ----\n Unit specification is optional: If not specified the SI unit (first\n element in above lists) will be applied!\n\n Range specification is optional, too. The value range is used to stabilize\n the newton algorithm. For more information see the \"getting started\"\n section in the online-documentation.\n\n Example\n -------\n Basic example for a setting up a tespy.networks.network.Network object. Specifying\n the fluids is mandatory! Unit systems, fluid property range and iterinfo\n are optional.\n\n Standard value for iterinfo is :code:`True`. This will print out\n convergence progress to the console. You can stop the printouts by setting\n this property to :code:`False`.\n\n >>> from tespy.networks import Network\n >>> fluid_list = ['water', 'air', 'R134a']\n >>> mynetwork = Network(fluids=fluid_list, p_unit='bar', T_unit='C')\n >>> mynetwork.set_attr(p_range=[1, 10])\n >>> type(mynetwork)\n <class 'tespy.networks.network.Network'>\n >>> mynetwork.set_attr(iterinfo=False)\n >>> mynetwork.iterinfo\n False\n >>> mynetwork.set_attr(iterinfo=True)\n >>> mynetwork.iterinfo\n True\n\n A simple network consisting of a source, a pipe and a sink. This example\n shows how the printout parameter can be used. We specify\n :code:`printout=False` for both connections, the pipe as well as the heat\n bus. Therefore the :code:`.print_results()` method should not print any\n results.\n\n >>> from tespy.networks import Network\n >>> from tespy.components import Source, Sink, Pipe\n >>> from tespy.connections import Connection, Bus\n >>> nw = Network(['CH4'], T_unit='C', p_unit='bar', v_unit='m3 / s')\n >>> so = Source('source')\n >>> si = Sink('sink')\n >>> p = Pipe('pipe', Q=0, pr=0.95, printout=False)\n >>> a = Connection(so, 'out1', p, 'in1')\n >>> b = Connection(p, 'out1', si, 'in1')\n >>> nw.add_conns(a, b)\n >>> a.set_attr(fluid={'CH4': 1}, T=30, p=10, m=10, printout=False)\n >>> b.set_attr(printout=False)\n >>> b = Bus('heat bus')\n >>> b.add_comps({'comp': p})\n >>> nw.add_busses(b)\n >>> b.set_attr(printout=False)\n >>> nw.set_attr(iterinfo=False)\n >>> nw.solve('design')\n >>> nw.print_results()\n \"\"\"\n\n def __init__(self, fluids, memorise_fluid_properties=True, **kwargs):\n\n # fluid list and constants\n if isinstance(fluids, list):\n self.fluids = sorted(fluids)\n else:\n msg = ('Please provide a list containing the network\\'s fluids on '\n 'creation.')\n logging.error(msg)\n raise TypeError(msg)\n\n self.set_defaults()\n self.set_fluid_back_ends(memorise_fluid_properties)\n self.set_attr(**kwargs)\n\n def set_defaults(self):\n \"\"\"Set default network properties.\"\"\"\n # connection dataframe\n self.conns = pd.DataFrame(\n columns=['object', 'source', 'source_id', 'target', 'target_id'],\n dtype='object')\n # user defined function dictionary for fast access\n self.user_defined_eq = {}\n # bus dictionary\n self.busses = OrderedDict()\n # results and specification dictionary\n self.results = {}\n self.specifications = {}\n\n # in case of a design calculation after an offdesign calculation\n self.redesign = False\n\n self.checked = False\n self.design_path = None\n self.iterinfo = True\n\n msg = 'Default unit specifications:\\n'\n for prop, data in fpd.items():\n # standard unit set\n self.__dict__.update({prop + '_unit': data['SI_unit']})\n msg += data['text'] + ': ' + data['SI_unit'] + '\\n'\n\n # don't need the last newline\n logging.debug(msg[:-1])\n\n # generic value range\n self.m_range_SI = np.array([-1e12, 1e12])\n self.p_range_SI = np.array([2e2, 300e5])\n self.h_range_SI = np.array([1e3, 7e6])\n\n for prop in ['m', 'p', 'h']:\n limits = self.get_attr(prop + '_range_SI')\n msg = (\n 'Default ' + fpd[prop]['text'] + ' limits\\n'\n 'min: ' + str(limits[0]) + ' ' +\n self.get_attr(prop + '_unit') + '\\n'\n 'max: ' + str(limits[1]) + ' ' + self.get_attr(prop + '_unit'))\n logging.debug(msg)\n\n def set_fluid_back_ends(self, memorise_fluid_properties):\n \"\"\"Set the fluid back ends.\"\"\"\n # this must be ordered as the fluid property memorisation calls\n # the mass fractions of the different fluids as keys in a given order.\n self.fluids_backends = OrderedDict()\n\n msg = 'Network fluids are: '\n i = 0\n for f in self.fluids:\n try:\n data = f.split('::')\n backend = data[0]\n fluid = data[1]\n except IndexError:\n backend = 'HEOS'\n fluid = f\n\n self.fluids_backends[fluid] = backend\n self.fluids[i] = fluid\n\n msg += fluid + ', '\n i += 1\n\n msg = msg[:-2] + '.'\n logging.debug(msg)\n\n # initialise fluid property memorisation function for this network\n fp.Memorise.add_fluids(self.fluids_backends, memorise_fluid_properties)\n\n # set up results dataframe for connections\n cols = (\n ['m', 'p', 'h', 'T', 'v', 'vol', 's', 'x', 'Td_bp'] + self.fluids)\n self.results['Connection'] = pd.DataFrame(\n columns=cols, dtype='float64')\n self.specifications['Connection'] = pd.DataFrame(\n columns=cols, dtype='bool')\n self.specifications['Ref'] = pd.DataFrame(\n columns=cols, dtype='bool')\n self.specifications['lookup'] = {\n 'properties': 'prop_specifications',\n 'chars': 'char_specifications',\n 'variables': 'var_specifications',\n 'groups': 'group_specifications'\n }\n\n def set_attr(self, **kwargs):\n r\"\"\"\n Set, resets or unsets attributes of a network.\n\n Parameters\n ----------\n h_range : list\n List with minimum and maximum values for enthalpy value range.\n\n h_unit : str\n Specify the unit for enthalpy: 'J / kg', 'kJ / kg', 'MJ / kg'.\n\n iterinfo : boolean\n Print convergence progress to console.\n\n m_range : list\n List with minimum and maximum values for mass flow value range.\n\n m_unit : str\n Specify the unit for mass flow: 'kg / s', 't / h'.\n\n p_range : list\n List with minimum and maximum values for pressure value range.\n\n p_unit : str\n Specify the unit for pressure: 'Pa', 'psi', 'bar', 'MPa'.\n\n s_unit : str\n Specify the unit for specific entropy: 'J / kgK', 'kJ / kgK',\n 'MJ / kgK'.\n\n T_unit : str\n Specify the unit for temperature: 'K', 'C', 'F', 'R'.\n\n v_unit : str\n Specify the unit for volumetric flow: 'm3 / s', 'm3 / h', 'l / s',\n 'l / h'.\n\n vol_unit : str\n Specify the unit for specific volume: 'm3 / kg', 'l / kg'.\n \"\"\"\n # unit sets\n for prop in fpd.keys():\n unit = prop + '_unit'\n if unit in kwargs:\n if kwargs[unit] in fpd[prop]['units']:\n self.__dict__.update({unit: kwargs[unit]})\n msg = (\n 'Setting ' + fpd[prop]['text'] +\n ' unit: ' + kwargs[unit] + '.')\n logging.debug(msg)\n else:\n keys = ', '.join(fpd[prop]['units'].keys())\n msg = (\n 'Allowed units for ' +\n fpd[prop]['text'] + ' are: ' + keys)\n logging.error(msg)\n raise ValueError(msg)\n\n for prop in ['m', 'p', 'h']:\n if prop + '_range' in kwargs:\n if isinstance(kwargs[prop + '_range'], list):\n self.__dict__.update(\n {prop + '_range_SI': hlp.convert_to_SI(\n prop, np.array(kwargs[prop + '_range']),\n self.get_attr(prop + '_unit'))})\n else:\n msg = (\n 'Specify the value range as list: [' + prop +\n '_min, ' + prop + '_max]')\n logging.error(msg)\n raise TypeError(msg)\n\n limits = self.get_attr(prop + '_range_SI')\n msg = (\n 'Setting ' + fpd[prop]['text'] +\n ' limits\\nmin: ' + str(limits[0]) + ' ' +\n self.get_attr(prop + '_unit') + '\\n'\n 'max: ' + str(limits[1]) + ' ' +\n self.get_attr(prop + '_unit'))\n logging.debug(msg)\n\n # update non SI value ranges\n for prop in ['m', 'p', 'h']:\n self.__dict__.update({\n prop + '_range': hlp.convert_from_SI(\n prop, self.get_attr(prop + '_range_SI'),\n self.get_attr(prop + '_unit')\n )\n })\n\n self.iterinfo = kwargs.get('iterinfo', self.iterinfo)\n\n if not isinstance(self.iterinfo, bool):\n msg = ('Network parameter iterinfo must be True or False!')\n logging.error(msg)\n raise TypeError(msg)\n\n def get_attr(self, key):\n r\"\"\"\n Get the value of a network attribute.\n\n Parameters\n ----------\n key : str\n The attribute you want to retrieve.\n\n Returns\n -------\n out :\n Specified attribute.\n \"\"\"\n if key in self.__dict__:\n return self.__dict__[key]\n else:\n msg = 'Network has no attribute \\\"' + str(key) + '\\\".'\n logging.error(msg)\n raise KeyError(msg)\n\n def add_subsys(self, *args):\n r\"\"\"\n Add one or more subsystems to the network.\n\n Parameters\n ----------\n c : tespy.components.subsystem.Subsystem\n The subsystem to be added to the network, subsystem objects si\n :code:`network.add_subsys(s1, s2, s3, ...)`.\n \"\"\"\n for subsys in args:\n for c in subsys.conns.values():\n self.add_conns(c)\n\n def get_conn(self, label):\n r\"\"\"\n Get Connection via label.\n\n Parameters\n ----------\n label : str\n Label of the Connection object.\n\n Returns\n -------\n c : tespy.connections.connection.Connection\n Connection object with specified label, None if no Connection of\n the network has this label.\n \"\"\"\n try:\n return self.conns.loc[label, 'object']\n except KeyError:\n logging.warning('Connection with label ' + label + ' not found.')\n return None\n\n def get_comp(self, label):\n r\"\"\"\n Get Component via label.\n\n Parameters\n ----------\n label : str\n Label of the Component object.\n\n Returns\n -------\n c : tespy.components.component.Component\n Component object with specified label, None if no Component of\n the network has this label.\n \"\"\"\n try:\n return self.comps.loc[label, 'object']\n except KeyError:\n logging.warning('Component with label ' + label + ' not found.')\n return None\n\n def add_conns(self, *args):\n r\"\"\"\n Add one or more connections to the network.\n\n Parameters\n ----------\n c : tespy.connections.connection.Connection\n The connection to be added to the network, connections objects ci\n :code:`add_conns(c1, c2, c3, ...)`.\n \"\"\"\n for c in args:\n if not isinstance(c, con.Connection):\n msg = ('Must provide tespy.connections.connection.Connection '\n 'objects as parameters.')\n logging.error(msg)\n raise TypeError(msg)\n\n elif c.label in self.conns.index:\n msg = (\n 'There is already a connection with the label ' +\n c.label + '. The connection labels must be unique!')\n logging.error(msg)\n raise ValueError(msg)\n\n c.good_starting_values = False\n\n self.conns.loc[c.label] = [\n c, c.source, c.source_id, c.target, c.target_id]\n\n self.results['Connection'].loc[c.label] = np.nan\n\n msg = 'Added connection ' + c.label + ' to network.'\n logging.debug(msg)\n # set status \"checked\" to false, if connection is added to network.\n self.checked = False\n\n def del_conns(self, *args):\n \"\"\"\n Remove one or more connections from the network.\n\n Parameters\n ----------\n c : tespy.connections.connection.Connection\n The connection to be removed from the network, connections objects\n ci :code:`del_conns(c1, c2, c3, ...)`.\n \"\"\"\n for c in args:\n self.conns.drop(c.label, inplace=True)\n self.results['Connection'].drop(c.label, inplace=True)\n msg = ('Deleted connection ' + c.label + ' from network.')\n logging.debug(msg)\n # set status \"checked\" to false, if connection is deleted from network.\n self.checked = False\n\n def check_conns(self):\n r\"\"\"Check connections for multiple usage of inlets or outlets.\"\"\"\n dub = self.conns.loc[\n self.conns.duplicated(['source', 'source_id']) == True] # noqa: E712\n for c in dub['object']:\n targets = ''\n for conns in self.conns[\n (self.conns['source'] == c.source) &\n (self.conns['source_id'] == c.source_id)]['object']:\n targets += conns.target.label + ' (' + conns.target_id + '); '\n\n msg = (\n 'The source ' + c.source.label + ' (' + c.source_id +\n ') is attached '\n 'to more than one target: ' + targets[:-2] + '. '\n 'Please check your network.')\n logging.error(msg)\n raise hlp.TESPyNetworkError(msg)\n\n dub = self.conns.loc[\n self.conns.duplicated(['target', 'target_id']) == True] # noqa: E712\n for c in dub['object']:\n sources = ''\n for conns in self.conns[\n (self.conns['target'] == c.target) &\n (self.conns['target_id'] == c.target_id)]['object']:\n sources += conns.source.label + ' (' + conns.source_id + '); '\n\n msg = (\n 'The target ' + c.target.label + ' (' + c.target_id +\n ') is attached to more than one source: ' + sources[:-2] + '. '\n 'Please check your network.')\n logging.error(msg)\n raise hlp.TESPyNetworkError(msg)\n\n def add_ude(self, *args):\n r\"\"\"\n Add a user defined function to the network.\n\n Parameters\n ----------\n c : tespy.tools.helpers.UserDefinedEquation\n The objects to be added to the network, UserDefinedEquation objects\n ci :code:`del_conns(c1, c2, c3, ...)`.\n \"\"\"\n for c in args:\n if not isinstance(c, hlp.UserDefinedEquation):\n msg = ('Must provide tespy.connections.connection.Connection '\n 'objects as parameters.')\n logging.error(msg)\n raise TypeError(msg)\n\n elif c.label in self.user_defined_eq:\n msg = (\n 'There is already a UserDefinedEquation with the label ' +\n c.label + '. The UserDefinedEquation labels must be '\n 'unique within a network')\n logging.error(msg)\n raise ValueError(msg)\n\n self.user_defined_eq[c.label] = c\n msg = 'Added UserDefinedEquation ' + c.label + ' to network.'\n logging.debug(msg)\n\n def del_ude(self, *args):\n \"\"\"\n Remove a user defined function from the network.\n\n Parameters\n ----------\n c : tespy.tools.helpers.UserDefinedEquation\n The objects to be added deleted from the network,\n UserDefinedEquation objects ci :code:`del_conns(c1, c2, c3, ...)`.\n \"\"\"\n for c in args:\n del self.user_defined_eq[c.label]\n msg = 'Deleted UserDefinedEquation ' + c.label + ' from network.'\n logging.debug(msg)\n\n def add_busses(self, *args):\n r\"\"\"\n Add one or more busses to the network.\n\n Parameters\n ----------\n b : tespy.connections.bus.Bus\n The bus to be added to the network, bus objects bi\n :code:`add_busses(b1, b2, b3, ...)`.\n \"\"\"\n for b in args:\n if self.check_busses(b):\n self.busses[b.label] = b\n msg = 'Added bus ' + b.label + ' to network.'\n logging.debug(msg)\n\n self.results[b.label] = pd.DataFrame(\n columns=[\n 'component value', 'bus value', 'efficiency',\n 'design value'],\n dtype='float64')\n\n def del_busses(self, *args):\n r\"\"\"\n Remove one or more busses from the network.\n\n Parameters\n ----------\n b : tespy.connections.bus.Bus\n The bus to be removed from the network, bus objects bi\n :code:`add_busses(b1, b2, b3, ...)`.\n \"\"\"\n for b in args:\n if b in self.busses.values():\n del self.busses[b.label]\n msg = 'Deleted bus ' + b.label + ' from network.'\n logging.debug(msg)\n\n del self.results[b.label]\n\n def check_busses(self, b):\n r\"\"\"\n Checksthe busses to be added for type, duplicates and identical labels.\n\n Parameters\n ----------\n b : tespy.connections.bus.Bus\n The bus to be checked.\n \"\"\"\n if isinstance(b, con.Bus):\n if len(self.busses) > 0:\n if b in self.busses.values():\n msg = ('Network contains the bus ' + b.label + ' (' +\n str(b) + ') already.')\n logging.error(msg)\n raise hlp.TESPyNetworkError(msg)\n elif b.label in self.busses:\n msg = ('Network already has a bus with the name ' +\n b.label + '.')\n logging.error(msg)\n raise hlp.TESPyNetworkError(msg)\n else:\n return True\n else:\n return True\n else:\n msg = 'Only objects of type bus are allowed in *args.'\n logging.error(msg)\n raise TypeError(msg)\n\n def check_network(self):\n r\"\"\"Check if components are connected properly within the network.\"\"\"\n if len(self.conns) == 0:\n msg = (\n 'No connections have been added to the network, please make '\n 'sure to add your connections with the .add_conns() method.')\n logging.error(msg)\n raise hlp.TESPyNetworkError(msg)\n\n if len(self.fluids) == 0:\n msg = ('Network has no fluids, please specify a list with fluids '\n 'on network creation.')\n logging.error(msg)\n raise hlp.TESPyNetworkError(msg)\n\n self.check_conns()\n # get unique components in connections dataframe\n comps = pd.unique(self.conns[['source', 'target']].values.ravel())\n # build the dataframe for components\n self.init_components(comps)\n # count number of incoming and outgoing connections and compare to\n # expected values\n for comp in self.comps['object']:\n num_o = (self.conns[['source', 'target']] == comp).sum().source\n num_i = (self.conns[['source', 'target']] == comp).sum().target\n if num_o != comp.num_o:\n msg = (\n comp.label + ' is missing ' + str(comp.num_o - num_o) + ' '\n 'outgoing connections. Make sure all outlets are connected'\n ' and all connections have been added to the network.')\n logging.error(msg)\n # raise an error in case network check is unsuccesful\n raise hlp.TESPyNetworkError(msg)\n elif num_i != comp.num_i:\n msg = (\n comp.label + ' is missing ' + str(comp.num_i - num_i) + ' '\n 'incoming connections. Make sure all inlets are connected '\n 'and all connections have been added to the network.')\n logging.error(msg)\n # raise an error in case network check is unsuccesful\n raise hlp.TESPyNetworkError(msg)\n\n # network checked\n self.checked = True\n msg = 'Networkcheck successful.'\n logging.info(msg)\n\n def init_components(self, comps):\n r\"\"\"\n Set up a dataframe for the network's components.\n\n Additionally, check, if all components have unique labels.\n\n Parameters\n ----------\n comps : pandas.core.frame.DataFrame\n DataFrame containing all components of the network gathered from\n the network's connection information.\n\n Note\n ----\n The dataframe for the components is derived from the network's\n connections. Thus it does not hold any additional information, the\n dataframe is used to simplify the code, only.\n \"\"\"\n self.comps = pd.DataFrame(dtype='object')\n\n labels = []\n for comp in comps:\n # this is required for printing and saving\n comp_type = comp.__class__.__name__\n self.comps.loc[comp, 'comp_type'] = comp_type\n self.comps.loc[comp, 'label'] = comp.label\n # get incoming and outgoing connections of a component\n sources = self.conns[self.conns['source'] == comp]\n sources = sources['source_id'].sort_values().index.tolist()\n targets = self.conns[self.conns['target'] == comp]\n targets = targets['target_id'].sort_values().index.tolist()\n # save the incoming and outgoing as well as the number of\n # connections as component attribute\n comp.inl = self.conns.loc[targets, 'object'].tolist()\n comp.outl = self.conns.loc[sources, 'object'].tolist()\n comp.num_i = len(comp.inlets())\n comp.num_o = len(comp.outlets())\n labels += [comp.label]\n\n # save the connection locations to the components\n comp.conn_loc = []\n for c in comp.inl + comp.outl:\n comp.conn_loc += [self.conns.index.get_loc(c.label)]\n\n if comp_type not in self.results:\n cols = [col for col, data in comp.variables.items()\n if isinstance(data, dc_cp)]\n self.results[comp_type] = pd.DataFrame(\n columns=cols, dtype='float64')\n if comp_type not in self.specifications:\n cols, groups, chars = [], [], []\n for col, data in comp.variables.items():\n if isinstance(data, dc_cp):\n cols += [col]\n elif isinstance(data, dc_gcp) or isinstance(data, dc_gcc):\n groups += [col]\n elif isinstance(data, dc_cc) or isinstance(data, dc_cm):\n chars += [col]\n self.specifications[comp_type] = {\n 'groups': pd.DataFrame(columns=groups, dtype='bool'),\n 'chars': pd.DataFrame(columns=chars, dtype='object'),\n 'variables': pd.DataFrame(columns=cols, dtype='bool'),\n 'properties': pd.DataFrame(columns=cols, dtype='bool')\n }\n\n self.comps = self.comps.reset_index().set_index('label')\n self.comps.rename(columns={'index': 'object'}, inplace=True)\n\n # check for duplicates in the component labels\n if len(labels) != len(list(set(labels))):\n duplicates = [\n item for item, count in Counter(labels).items() if count > 1]\n msg = ('All Components must have unique labels, duplicate labels '\n 'are: \"' + '\", \"'.join(duplicates) + '\".')\n logging.error(msg)\n raise hlp.TESPyNetworkError(msg)\n\n def initialise(self):\n r\"\"\"\n Initilialise the network depending on calclation mode.\n\n Design\n\n - Generic fluid composition and fluid property initialisation.\n - Starting values from initialisation path if provided.\n\n Offdesign\n\n - Check offdesign path specification.\n - Set component and connection design point properties.\n - Switch from design/offdesign parameter specification.\n \"\"\"\n # keep track of the number of bus, component and connection equations\n # as well as number of component variables\n self.num_bus_eq = 0\n self.num_comp_eq = 0\n self.num_conn_eq = 0\n self.num_comp_vars = 0\n self.init_set_properties()\n\n if self.mode == 'offdesign':\n self.redesign = True\n if self.design_path is None:\n # must provide design_path\n msg = ('Please provide \"design_path\" for every offdesign '\n 'calculation.')\n logging.error(msg)\n raise hlp.TESPyNetworkError(msg)\n\n # load design case\n if self.new_design:\n self.init_offdesign_params()\n\n self.init_offdesign()\n\n else:\n # reset any preceding offdesign calculation\n self.init_design()\n # generic fluid initialisation\n # for offdesign cases good starting values should be available\n self.init_fluids()\n\n # generic fluid property initialisation\n self.init_properties()\n\n msg = 'Network initialised.'\n logging.info(msg)\n\n def init_set_properties(self):\n \"\"\"Specification of SI values for user set values.\"\"\"\n # fluid property values\n for c in self.conns['object']:\n self.specifications['Connection'].loc[c.label] = False\n if not self.init_previous:\n c.good_starting_values = False\n\n c.conn_loc = self.conns.index.get_loc(c.label)\n\n for key in ['m', 'p', 'h', 'T', 'x', 'v', 'Td_bp', 'vol', 's']:\n # read unit specifications\n if key == 'Td_bp':\n c.get_attr(key).unit = self.get_attr('T_unit')\n else:\n c.get_attr(key).unit = self.get_attr(key + '_unit')\n # set SI value\n if c.get_attr(key).val_set:\n self.specifications['Connection'].loc[c.label, key] = True\n c.get_attr(key).val_SI = hlp.convert_to_SI(\n key, c.get_attr(key).val, c.get_attr(key).unit)\n if c.get_attr(key).ref_set:\n self.specifications['Ref'].loc[c.label, key] = True\n if key == 'T':\n c.get_attr(key).ref.delta_SI = hlp.convert_to_SI(\n 'Td_bp', c.get_attr(key).ref.delta,\n c.get_attr(key).unit)\n else:\n c.get_attr(key).ref.delta_SI = hlp.convert_to_SI(\n key, c.get_attr(key).ref.delta,\n c.get_attr(key).unit)\n\n # fluid vector specification\n tmp = c.fluid.val\n for fluid in tmp.keys():\n if fluid not in self.fluids:\n msg = ('Your connection ' + c.label + ' holds a fluid, '\n 'that is not part of the networks\\'s fluids (' +\n fluid + ').')\n raise hlp.TESPyNetworkError(msg)\n tmp0 = c.fluid.val0\n tmp_set = c.fluid.val_set\n self.specifications['Connection'].loc[\n c.label, 'balance'] = c.fluid.balance\n\n # enter fluid specifications into specification DataFrame\n for fluid in self.fluids:\n try:\n self.specifications['Connection'].loc[c.label, fluid] = (\n c.fluid.val_set[fluid])\n except KeyError:\n pass\n\n c.fluid.val = OrderedDict()\n c.fluid.val0 = OrderedDict()\n c.fluid.val_set = OrderedDict()\n\n # if the number of fluids is one the mass fraction is 1 for every\n # connection\n if len(self.fluids) == 1:\n c.fluid.val[self.fluids[0]] = 1\n c.fluid.val0[self.fluids[0]] = 1\n if self.fluids[0] in tmp_set:\n c.fluid.val_set[self.fluids[0]] = tmp_set[self.fluids[0]]\n else:\n c.fluid.val_set[self.fluids[0]] = False\n\n # jump to next connection\n continue\n\n for fluid in self.fluids:\n # take over values from temporary dicts\n if fluid in tmp and fluid in tmp_set:\n c.fluid.val[fluid] = tmp[fluid]\n c.fluid.val0[fluid] = tmp[fluid]\n c.fluid.val_set[fluid] = tmp_set[fluid]\n # take over starting values\n elif fluid in tmp0:\n if fluid not in tmp_set:\n c.fluid.val[fluid] = tmp0[fluid]\n c.fluid.val0[fluid] = tmp0[fluid]\n c.fluid.val_set[fluid] = False\n # if fluid not in keys\n else:\n c.fluid.val[fluid] = 0\n c.fluid.val0[fluid] = 0\n c.fluid.val_set[fluid] = False\n\n msg = (\n 'Updated fluid property SI values and fluid mass fraction for '\n 'user specified connection parameters.')\n logging.debug(msg)\n\n def init_design(self):\n r\"\"\"\n Initialise a design calculation.\n\n Offdesign parameters are unset, design parameters are set. If\n :code:`local_offdesign` is :code:`True` for connections or components,\n the design point information are read from the .csv-files in the\n respective :code:`design_path`. In this case, the design values are\n unset, the offdesign values set.\n \"\"\"\n # connections\n for c in self.conns['object']:\n # read design point information of connections with\n # local_offdesign activated from their respective design path\n if c.local_offdesign:\n if c.design_path is None:\n msg = (\n 'The parameter local_offdesign is True for the '\n 'connection ' + c.label + ', an individual '\n 'design_path must be specified in this case!')\n logging.error(msg)\n raise hlp.TESPyNetworkError(msg)\n\n # unset design parameters\n for var in c.design:\n c.get_attr(var).val_set = False\n # set offdesign parameters\n for var in c.offdesign:\n c.get_attr(var).val_set = True\n\n # read design point information\n df = self.init_read_connections(c.design_path)\n msg = (\n 'Reading individual design point information for '\n 'connection ' + c.label + ' from path ' + c.design_path +\n 'connections.')\n logging.debug(msg)\n\n # write data to connections\n self.init_conn_design_params(c, df)\n\n else:\n # unset all design values\n c.m.design = np.nan\n c.p.design = np.nan\n c.h.design = np.nan\n c.fluid.design = OrderedDict()\n\n c.new_design = True\n\n # switch connections to design mode\n if self.redesign:\n for var in c.design:\n c.get_attr(var).val_set = True\n\n for var in c.offdesign:\n c.get_attr(var).val_set = False\n\n # unset design values for busses, count bus equations and\n # reindex bus dictionary\n for b in self.busses.values():\n self.busses[b.label] = b\n self.num_bus_eq += b.P.is_set * 1\n for cp in b.comps.index:\n b.comps.loc[cp, 'P_ref'] = np.nan\n\n series = pd.Series(dtype='float64')\n for cp in self.comps['object']:\n # read design point information of components with\n # local_offdesign activated from their respective design path\n if cp.local_offdesign:\n if cp.design_path is not None:\n # get type of component (class name)\n c = cp.__class__.__name__\n # read design point information\n path = hlp.modify_path_os(\n cp.design_path + '/components/' + c + '.csv')\n df = pd.read_csv(\n path, sep=';', decimal='.', converters={\n 'busses': ast.literal_eval,\n 'bus_P_ref': ast.literal_eval})\n df.set_index('label', inplace=True)\n # write data\n self.init_comp_design_params(cp, df.loc[cp.label])\n\n # unset design parameters\n for var in cp.design:\n cp.get_attr(var).is_set = False\n\n # set offdesign parameters\n switched = False\n msg = 'Set component attributes '\n\n for var in cp.offdesign:\n # set variables provided in .offdesign attribute\n data = cp.get_attr(var)\n data.is_set = True\n\n # take nominal values from design point\n if isinstance(data, dc_cp):\n cp.get_attr(var).val = cp.get_attr(var).design\n switched = True\n msg += var + ', '\n\n if switched:\n msg = (msg[:-2] + ' to design value at component ' +\n cp.label + '.')\n logging.debug(msg)\n\n cp.new_design = False\n\n else:\n # switch connections to design mode\n if self.redesign:\n for var in cp.design:\n cp.get_attr(var).is_set = True\n\n for var in cp.offdesign:\n cp.get_attr(var).is_set = False\n\n cp.set_parameters(self.mode, series)\n\n # component initialisation\n cp.comp_init(self)\n ct = cp.__class__.__name__\n for spec in self.specifications[ct].keys():\n if len(cp.get_attr(self.specifications['lookup'][spec])) > 0:\n self.specifications[ct][spec].loc[cp.label] = (\n cp.get_attr(self.specifications['lookup'][spec]))\n\n # count number of component equations and variables\n self.num_comp_vars += cp.num_vars\n self.num_comp_eq += cp.num_eq\n\n def init_offdesign_params(self):\n r\"\"\"\n Read design point information from specified :code:`design_path`.\n\n If a :code:`design_path` has been specified individually for components\n or connections, the data will be read from the specified individual\n path instead.\n\n Note\n ----\n The methods\n :py:meth:`tespy.networks.network.Network.init_comp_design_params`\n (components) and the\n :py:meth:`tespy.networks.network.Network.init_conn_design_params`\n (connections) handle the parameter specification.\n \"\"\"\n # components without any parameters\n not_required = [\n 'source', 'sink', 'node', 'merge', 'splitter', 'separator', 'drum',\n 'subsystem_interface', 'droplet_separator']\n # fetch all components, reindex with label\n df_comps = self.comps.copy()\n df_comps = df_comps[~df_comps['comp_type'].isin(not_required)]\n\n # iter through unique types of components (class names)\n for c in df_comps['comp_type'].unique():\n path = hlp.modify_path_os(\n self.design_path + '/components/' + c + '.csv')\n msg = (\n 'Reading design point information for components of type '\n + c + ' from path ' + path + '.')\n logging.debug(msg)\n\n # read data\n df = pd.read_csv(\n path, sep=';', decimal='.', converters={\n 'busses': ast.literal_eval,\n 'bus_P_ref': ast.literal_eval})\n df.set_index('label', inplace=True)\n # iter through all components of this type and set data\n for c_label in df.index:\n comp = df_comps.loc[c_label, 'object']\n # read data of components with individual design_path\n if comp.design_path is not None:\n path_c = hlp.modify_path_os(\n comp.design_path + '/components/' + c + '.csv')\n df_c = pd.read_csv(\n path_c, sep=';', decimal='.', converters={\n 'busses': ast.literal_eval,\n 'bus_P_ref': ast.literal_eval})\n df_c.set_index('label', inplace=True)\n data = df_c.loc[comp.label]\n\n else:\n data = df.loc[comp.label]\n\n # write data to components\n self.init_comp_design_params(comp, data)\n\n msg = 'Done reading design point information for components.'\n logging.debug(msg)\n\n # read connection design point information\n df = self.init_read_connections(self.design_path)\n msg = (\n 'Reading design point information for connections from path ' +\n self.design_path + '/connections.csv.')\n logging.debug(msg)\n\n # iter through connections\n for c in self.conns['object']:\n\n # read data of connections with individual design_path\n if c.design_path is not None:\n df_c = self.init_read_connections(c.design_path)\n msg = (\n 'Reading individual design point information for '\n 'connection ' + c.label + ' from path ' + c.design_path +\n '/connections.csv.')\n logging.debug(msg)\n\n # write data\n self.init_conn_design_params(c, df_c)\n\n else:\n # write data\n self.init_conn_design_params(c, df)\n\n msg = 'Done reading design point information for connections.'\n logging.debug(msg)\n\n def init_comp_design_params(self, component, data):\n r\"\"\"\n Write design point information to components.\n\n Parameters\n ----------\n component : tespy.components.component.Component\n Write design point information to this component.\n\n data : pandas.core.series.Series, pandas.core.frame.DataFrame\n Design point information.\n \"\"\"\n # write component design data\n component.set_parameters(self.mode, data)\n # write design values to busses\n i = 0\n for b in data.busses:\n bus = self.busses[b].comps\n bus.loc[component, 'P_ref'] = data['bus_P_ref'][i]\n i += 1\n\n def init_conn_design_params(self, c, df):\n r\"\"\"\n Write design point information to connections.\n\n Parameters\n ----------\n c : tespy.connections.connection.Connection\n Write design point information to this connection.\n\n df : pandas.core.frame.DataFrame\n Dataframe containing design point information.\n \"\"\"\n # match connection (source, source_id, target, target_id) on\n # connection objects of design file\n conn = df.loc[\n df['source'].isin([c.source.label]) &\n df['target'].isin([c.target.label]) &\n df['source_id'].isin([c.source_id]) &\n df['target_id'].isin([c.target_id])]\n\n try:\n # read connection information\n conn_id = conn.index[0]\n for var in ['m', 'p', 'h', 'v', 'x', 'T', 'Td_bp']:\n c.get_attr(var).design = hlp.convert_to_SI(\n var, df.loc[conn_id, var], df.loc[conn_id, var + '_unit'])\n c.vol.design = c.v.design / c.m.design\n for fluid in self.fluids:\n c.fluid.design[fluid] = df.loc[conn_id, fluid]\n except IndexError:\n # no matches in the connections of the network and the design files\n msg = (\n 'Could not find connection ' + c.label + ' in design case. '\n 'Please, make sure no connections have been modified or '\n 'components have been relabeled for your offdesign '\n 'calculation.')\n logging.error(msg)\n raise hlp.TESPyNetworkError(msg)\n\n def init_offdesign(self):\n r\"\"\"\n Switch components and connections from design to offdesign mode.\n\n Note\n ----\n **components**\n\n All parameters stated in the component's attribute :code:`cp.design`\n will be unset and all parameters stated in the component's attribute\n :code:`cp.offdesign` will be set instead.\n\n Additionally, all component parameters specified as variables are\n unset and the values from design point are set.\n\n **connections**\n\n All parameters given in the connection's attribute :code:`c.design`\n will be unset and all parameters stated in the connections's attribute\n :code:`cp.offdesign` will be set instead. This does also affect\n referenced values!\n \"\"\"\n for c in self.conns['object']:\n if not c.local_design:\n # switch connections to offdesign mode\n for var in c.design:\n c.get_attr(var).val_set = False\n c.get_attr(var).ref_set = False\n\n for var in c.offdesign:\n c.get_attr(var).val_set = True\n c.get_attr(var).val_SI = c.get_attr(var).design\n\n c.new_design = False\n\n msg = 'Switched connections from design to offdesign.'\n logging.debug(msg)\n\n for cp in self.comps['object']:\n if not cp.local_design:\n # unset variables provided in .design attribute\n for var in cp.design:\n cp.get_attr(var).is_set = False\n\n switched = False\n msg = 'Set component attributes '\n\n for var in cp.offdesign:\n # set variables provided in .offdesign attribute\n data = cp.get_attr(var)\n data.is_set = True\n\n # take nominal values from design point\n if isinstance(data, dc_cp):\n cp.get_attr(var).val = cp.get_attr(var).design\n switched = True\n msg += var + ', '\n\n if switched:\n msg = (msg[:-2] + ' to design value at component ' +\n cp.label + '.')\n logging.debug(msg)\n\n # start component initialisation\n cp.comp_init(self)\n ct = cp.__class__.__name__\n for spec in self.specifications[ct].keys():\n if len(cp.get_attr(self.specifications['lookup'][spec])) > 0:\n self.specifications[ct][spec].loc[cp.label] = (\n cp.get_attr(self.specifications['lookup'][spec]))\n\n cp.new_design = False\n self.num_comp_vars += cp.num_vars\n self.num_comp_eq += cp.num_eq\n\n msg = 'Switched components from design to offdesign.'\n logging.debug(msg)\n\n # count bus equations and reindex bus dictionary\n for b in self.busses.values():\n self.busses[b.label] = b\n self.num_bus_eq += b.P.is_set * 1\n\n def init_fluids(self):\n r\"\"\"\n Initialise the fluid vector on every connection of the network.\n\n - Create fluid vector for every component as dict,\n index: nw.fluids,\n values: 0 if not set by user.\n - Create fluid_set vector with same logic,\n index: nw.fluids,\n values: False if not set by user.\n - If there are any combustion chambers in the network, calculate fluid\n vector starting from there.\n - Propagate fluid vector in direction of sources and targets.\n \"\"\"\n # stop fluid propagation for single fluid networks\n if len(self.fluids) == 1:\n return\n\n # fluid propagation from set values\n for c in self.conns['object']:\n if any(c.fluid.val_set.values()):\n c.target.propagate_fluid_to_target(c, c.target)\n c.source.propagate_fluid_to_source(c, c.source)\n\n # fluid starting value generation for components\n for cp in self.comps['object']:\n cp.initialise_fluids()\n\n msg = 'Fluid initialisation done.'\n logging.debug(msg)\n\n def init_properties(self):\n \"\"\"\n Initialise the fluid properties on every connection of the network.\n\n - Set generic starting values for mass flow, enthalpy and pressure if\n not user specified, read from :code:`ìnit_path` or available from\n previous calculation.\n - For generic starting values precalculate enthalpy value at points of\n given temperature, vapor mass fraction, temperature difference to\n boiling point or fluid state.\n \"\"\"\n if self.init_path is not None:\n df = self.init_read_connections(self.init_path)\n # improved starting values for referenced connections,\n # specified vapour content values, temperature values as well as\n # subccooling/overheating and state specification\n for c in self.conns['object']:\n if self.init_path is not None:\n conn = df.loc[\n df['source'].isin([c.source.label]) &\n df['target'].isin([c.target.label]) &\n df['source_id'].isin([c.source_id]) &\n df['target_id'].isin([c.target_id])]\n try:\n conn_id = conn.index[0]\n # overwrite SI-values with values from init_file,\n # except user specified values\n for prop in ['m', 'p', 'h']:\n data = c.get_attr(prop)\n data.val0 = df.loc[conn_id, prop]\n data.unit = df.loc[conn_id, prop + '_unit']\n\n for fluid in self.fluids:\n if not c.fluid.val_set[fluid]:\n c.fluid.val[fluid] = df.loc[conn_id, fluid]\n c.fluid.val0[fluid] = c.fluid.val[fluid]\n\n c.good_starting_values = True\n\n except IndexError:\n msg = (\n 'Could not find connection ' + c.label + ' in '\n 'connections.csv of init_path ' + self.init_path + '.')\n logging.debug(msg)\n\n if sum(c.fluid.val.values()) == 0:\n msg = (\n 'The starting value for the fluid composition of the '\n 'connection ' + c.label + ' is empty. This might lead to '\n 'issues in the initialisation and solving process as '\n 'fluid property functions can not be called. Make sure '\n 'you specified a fluid composition in all parts of the '\n 'network.')\n logging.warning(msg)\n\n for key in ['m', 'p', 'h']:\n if not c.good_starting_values:\n self.init_val0(c, key)\n if not c.get_attr(key).val_set:\n c.get_attr(key).val_SI = hlp.convert_to_SI(\n key, c.get_attr(key).val0, c.get_attr(key).unit)\n\n self.init_count_connections_parameters(c)\n\n for c in self.conns['object']:\n if not c.good_starting_values:\n for key in ['m', 'p', 'h', 'T']:\n if (c.get_attr(key).ref_set and\n not c.get_attr(key).val_set):\n c.get_attr(key).val_SI = (\n c.get_attr(key).ref.obj.get_attr(key).val_SI *\n c.get_attr(key).ref.factor +\n c.get_attr(key).ref.delta_SI)\n\n self.init_precalc_properties(c)\n\n # starting values for specified subcooling/overheating\n # and state specification. These should be recalculated even with\n # good starting values, for example, when one exchanges enthalpy\n # with boiling point temperature difference.\n if ((c.Td_bp.val_set or c.state.is_set) and\n not c.h.val_set):\n if ((c.Td_bp.val_SI > 0 and c.Td_bp.val_set) or\n (c.state.val == 'g' and c.state.is_set)):\n h = fp.h_mix_pQ(c.get_flow(), 1)\n if c.h.val_SI < h:\n c.h.val_SI = h * 1.001\n elif ((c.Td_bp.val_SI < 0 and c.Td_bp.val_set) or\n (c.state.val == 'l' and c.state.is_set)):\n h = fp.h_mix_pQ(c.get_flow(), 0)\n if c.h.val_SI > h:\n c.h.val_SI = h * 0.999\n\n msg = 'Generic fluid property specification complete.'\n logging.debug(msg)\n\n def init_count_connections_parameters(self, c):\n \"\"\"\n Count the number of parameters set on a connection.\n\n Parameters\n ----------\n c : tespy.connections.connection.Connection\n Connection count parameters of.\n \"\"\"\n self.num_conn_eq += [\n c.m.val_set, c.p.val_set, c.h.val_set, c.T.val_set,\n c.x.val_set, c.v.val_set, c.Td_bp.val_set].count(True)\n self.num_conn_eq += [\n c.m.ref_set, c.p.ref_set, c.h.ref_set, c.T.ref_set].count(True)\n self.num_conn_eq += list(c.fluid.val_set.values()).count(True)\n self.num_conn_eq += c.fluid.balance * 1\n\n def init_precalc_properties(self, c):\n \"\"\"\n Precalculate enthalpy values for connections.\n\n Precalculation is performed only if temperature, vapor mass fraction,\n temperature difference to boiling point or phase is specified.\n\n Parameters\n ----------\n c : tespy.connections.connection.Connection\n Connection to precalculate values for.\n \"\"\"\n # starting values for specified vapour content or temperature\n if c.x.val_set and not c.h.val_set:\n try:\n c.h.val_SI = fp.h_mix_pQ(c.get_flow(), c.x.val_SI)\n except ValueError:\n pass\n\n if c.T.val_set and not c.h.val_set:\n try:\n c.h.val_SI = fp.h_mix_pT(c.get_flow(), c.T.val_SI)\n except ValueError:\n pass\n\n def init_val0(self, c, key):\n r\"\"\"\n Set starting values for fluid properties.\n\n The component classes provide generic starting values for their inlets\n and outlets.\n\n Parameters\n ----------\n c : tespy.connections.connection.Connection\n Connection to initialise.\n \"\"\"\n if np.isnan(c.get_attr(key).val0):\n # starting value for mass flow is 1 kg/s\n if key == 'm':\n c.get_attr(key).val0 = 1\n\n # generic starting values for pressure and enthalpy\n else:\n # retrieve starting values from component information\n val_s = c.source.initialise_source(c, key)\n val_t = c.target.initialise_target(c, key)\n\n if val_s == 0 and val_t == 0:\n if key == 'p':\n c.get_attr(key).val0 = 1e5\n elif key == 'h':\n c.get_attr(key).val0 = 1e6\n\n elif val_s == 0:\n c.get_attr(key).val0 = val_t\n elif val_t == 0:\n c.get_attr(key).val0 = val_s\n else:\n c.get_attr(key).val0 = (val_s + val_t) / 2\n\n # change value according to specified unit system\n c.get_attr(key).val0 = hlp.convert_from_SI(\n key, c.get_attr(key).val0, self.get_attr(key + '_unit'))\n\n @staticmethod\n def init_read_connections(base_path):\n r\"\"\"\n Read connection information from base_path.\n\n Parameters\n ----------\n base_path : str\n Path to network information.\n \"\"\"\n path = hlp.modify_path_os(base_path + '/connections.csv')\n df = pd.read_csv(path, index_col=0, delimiter=';', decimal='.')\n return df\n\n def solve(self, mode, init_path=None, design_path=None,\n max_iter=50, min_iter=4, init_only=False, init_previous=True,\n use_cuda=False, always_all_equations=True):\n r\"\"\"\n Solve the network.\n\n - Check network consistency.\n - Initialise calculation and preprocessing.\n - Perform actual calculation.\n - Postprocessing.\n\n Parameters\n ----------\n mode : str\n Choose from 'design' and 'offdesign'.\n\n init_path : str\n Path to the folder, where your network was saved to, e.g.\n saving to :code:`nw.save('myplant/tests')` would require loading\n from :code:`init_path='myplant/tests'`.\n\n design_path : str\n Path to the folder, where your network's design case was saved to,\n e.g. saving to :code:`nw.save('myplant/tests')` would require\n loading from :code:`design_path='myplant/tests'`.\n\n max_iter : int\n Maximum number of iterations before calculation stops, default: 50.\n\n min_iter : int\n Minimum number of iterations before calculation stops, default: 4.\n\n init_only : boolean\n Perform initialisation only, default: :code:`False`.\n\n init_previous : boolean\n Initialise the calculation with values from the previous\n calculation, default: :code:`True`.\n\n use_cuda : boolean\n Use cuda instead of numpy for matrix inversion, default:\n :code:`False`.\n\n always_all_equations : boolean\n Calculate all equations in every iteration. Disabling this flag,\n will increase calculation speed, especially for mixtures, default:\n :code:`True`.\n\n Note\n ----\n For more information on the solution process have a look at the online\n documentation at tespy.readthedocs.io in the section \"TESPy modules\".\n \"\"\"\n self.new_design = False\n if self.design_path == design_path and design_path is not None:\n for c in self.conns['object']:\n if c.new_design:\n self.new_design = True\n break\n if not self.new_design:\n for cp in self.comps['object']:\n if cp.new_design:\n self.new_design = True\n break\n\n else:\n self.new_design = True\n\n self.init_path = init_path\n self.design_path = design_path\n self.max_iter = max_iter\n self.min_iter = min_iter\n self.init_previous = init_previous\n self.iter = 0\n self.use_cuda = use_cuda\n self.always_all_equations = always_all_equations\n\n if self.use_cuda and cu is None:\n msg = ('Specifying use_cuda=True requires cupy to be installed on '\n 'your machine. Numpy will be used instead.')\n logging.warning(msg)\n self.use_cuda = False\n\n if mode != 'offdesign' and mode != 'design':\n msg = 'Mode must be \"design\" or \"offdesign\".'\n logging.error(msg)\n raise ValueError(msg)\n else:\n self.mode = mode\n\n msg = (\n 'Solver properties: mode=' + self.mode + ', init_path=' +\n str(self.init_path) + ', design_path=' + str(self.design_path) +\n ', max_iter=' + str(max_iter) + ', min_iter=' + str(min_iter) +\n ', init_only=' + str(init_only))\n logging.debug(msg)\n\n if not self.checked:\n self.check_network()\n\n msg = (\n 'Network properties: '\n 'number of components=' + str(len(self.comps)) +\n ', number of connections=' + str(len(self.conns.index)) +\n ', number of busses=' + str(len(self.busses)))\n logging.debug(msg)\n\n self.initialise()\n\n if init_only:\n return\n\n msg = 'Starting solver.'\n logging.info(msg)\n\n self.solve_determination()\n self.solve_loop()\n\n if self.lin_dep:\n msg = (\n 'Singularity in jacobian matrix, calculation aborted! Make '\n 'sure your network does not have any linear dependencies in '\n 'the parametrisation. Other reasons might be\\n-> given '\n 'temperature with given pressure in two phase region, try '\n 'setting enthalpy instead or provide accurate starting value '\n 'for pressure.\\n-> given logarithmic temperature differences '\n 'or kA-values for heat exchangers, \\n-> support better '\n 'starting values.\\n-> bad starting value for fuel mass flow '\n 'of combustion chamber, provide small (near to zero, but not '\n 'zero) starting value.')\n logging.error(msg)\n return\n\n self.postprocessing()\n fp.Memorise.del_memory(self.fluids)\n\n if not self.progress:\n msg = (\n 'The solver does not seem to make any progress, aborting '\n 'calculation. Residual value is '\n '{:.2e}'.format(norm(self.residual)) + '. This frequently '\n 'happens, if the solver pushes the fluid properties out of '\n 'their feasible range.')\n logging.warning(msg)\n return\n\n msg = 'Calculation complete.'\n logging.info(msg)\n\n def solve_loop(self):\n r\"\"\"Loop of the newton algorithm.\"\"\"\n # parameter definitions\n self.res = np.array([])\n self.residual = np.zeros([self.num_vars])\n self.increment = np.ones([self.num_vars])\n self.jacobian = np.zeros((self.num_vars, self.num_vars))\n\n self.start_time = time()\n self.progress = True\n\n if self.iterinfo:\n self.print_iterinfo_head()\n\n for self.iter in range(self.max_iter):\n\n self.increment_filter = np.absolute(self.increment) < err ** 2\n self.solve_control()\n self.res = np.append(self.res, norm(self.residual))\n\n if self.iterinfo:\n self.print_iterinfo_body()\n\n if ((self.iter >= self.min_iter and self.res[-1] < err ** 0.5) or\n self.lin_dep):\n break\n\n if self.iter > 40:\n if (all(self.res[(self.iter - 3):] >= self.res[-3] * 0.95) and\n self.res[-1] >= self.res[-2] * 0.95):\n self.progress = False\n break\n\n self.end_time = time()\n\n self.print_iterinfo_tail()\n\n if self.iter == self.max_iter - 1:\n msg = ('Reached maximum iteration count (' + str(self.max_iter) +\n '), calculation stopped. Residual value is '\n '{:.2e}'.format(norm(self.residual)))\n logging.warning(msg)\n\n def solve_determination(self):\n r\"\"\"Check, if the number of supplied parameters is sufficient.\"\"\"\n # number of variables per connection\n self.num_conn_vars = len(self.fluids) + 3\n\n # number of user defined functions\n self.num_ude_eq = len(self.user_defined_eq)\n\n for func in self.user_defined_eq.values():\n # remap connection objects\n func.conns = [\n self.conns.loc[c.label, 'object'] for c in func.conns]\n # remap jacobian\n func.jacobian = {\n c: np.zeros(self.num_conn_vars)\n for c in func.conns}\n\n # total number of variables\n self.num_vars = (\n self.num_conn_vars * len(self.conns.index) + self.num_comp_vars)\n\n msg = 'Number of connection equations: ' + str(self.num_conn_eq) + '.'\n logging.debug(msg)\n\n msg = 'Number of bus equations: ' + str(self.num_bus_eq) + '.'\n logging.debug(msg)\n\n msg = 'Number of component equations: ' + str(self.num_comp_eq) + '.'\n logging.debug(msg)\n\n msg = 'Number of user defined equations: ' + str(self.num_ude_eq) + '.'\n logging.debug(msg)\n\n msg = 'Total number of variables: ' + str(self.num_vars) + '.'\n logging.debug(msg)\n msg = 'Number of component variables: ' + str(self.num_comp_vars) + '.'\n logging.debug(msg)\n msg = ('Number of connection variables: ' +\n str(self.num_conn_vars * len(self.conns.index)) + '.')\n logging.debug(msg)\n\n n = (\n self.num_comp_eq + self.num_conn_eq +\n self.num_bus_eq + self.num_ude_eq)\n if n > self.num_vars:\n msg = ('You have provided too many parameters: ' +\n str(self.num_vars) + ' required, ' + str(n) +\n ' supplied. Aborting calculation!')\n logging.error(msg)\n raise hlp.TESPyNetworkError(msg)\n elif n < self.num_vars:\n msg = ('You have not provided enough parameters: '\n + str(self.num_vars) + ' required, ' + str(n) +\n ' supplied. Aborting calculation!')\n logging.error(msg)\n raise hlp.TESPyNetworkError(msg)\n\n def print_iterinfo_head(self):\n \"\"\"Print head of convergence progress.\"\"\"\n if self.num_comp_vars == 0:\n # iterinfo printout without any custom variables\n msg = (\n 'iter\\t| residual | massflow | pressure | enthalpy | fluid\\n')\n msg += '-' * 8 + '+----------' * 4 + '+' + '-' * 9\n\n else:\n # iterinfo printout with custom variables in network\n msg = ('iter\\t| residual | massflow | pressure | enthalpy | '\n 'fluid | custom\\n')\n msg += '-' * 8 + '+----------' * 5 + '+' + '-' * 9\n\n print(msg)\n\n def print_iterinfo_body(self):\n \"\"\"Print convergence progress.\"\"\"\n vec = self.increment[0:-(self.num_comp_vars + 1)]\n msg = (str(self.iter + 1))\n\n if not self.lin_dep and not np.isnan(norm(self.residual)):\n msg += '\\t| ' + '{:.2e}'.format(norm(self.residual))\n msg += ' | ' + '{:.2e}'.format(norm(vec[0::self.num_conn_vars]))\n msg += ' | ' + '{:.2e}'.format(norm(vec[1::self.num_conn_vars]))\n msg += ' | ' + '{:.2e}'.format(norm(vec[2::self.num_conn_vars]))\n\n ls = []\n for f in range(len(self.fluids)):\n ls += vec[3 + f::self.num_conn_vars].tolist()\n\n msg += ' | ' + '{:.2e}'.format(norm(ls))\n\n if self.num_comp_vars > 0:\n msg += ' | ' + '{:.2e}'.format(norm(\n self.increment[-self.num_comp_vars:]))\n\n else:\n if np.isnan(norm(self.residual)):\n msg += '\\t| nan'\n else:\n msg += '\\t| ' + '{:.2e}'.format(norm(self.residual))\n msg += ' | nan' * 4\n if self.num_comp_vars > 0:\n msg += ' | nan'\n\n print(msg)\n\n def print_iterinfo_tail(self):\n \"\"\"Print tail of convergence progress.\"\"\"\n msg = (\n 'Total iterations: ' + str(self.iter + 1) + ', Calculation '\n 'time: ' + str(round(self.end_time - self.start_time, 1)) +\n ' s, Iterations per second: ')\n ips = 'inf'\n if self.end_time != self.start_time:\n ips = str(round(\n (self.iter + 1) / (self.end_time - self.start_time), 2))\n msg += ips\n logging.debug(msg)\n\n if self.iterinfo:\n if self.num_comp_vars == 0:\n print('-' * 8 + '+----------' * 4 + '+' + '-' * 9)\n else:\n print('-' * 8 + '+----------' * 5 + '+' + '-' * 9)\n print(msg)\n\n def matrix_inversion(self):\n \"\"\"Invert matrix of derivatives and caluclate increment.\"\"\"\n self.lin_dep = True\n try:\n # Let the matrix inversion be computed by the GPU if use_cuda in\n # global_vars.py is true.\n if self.use_cuda:\n self.increment = cu.asnumpy(cu.dot(\n cu.linalg.inv(cu.asarray(self.jacobian)),\n -cu.asarray(self.residual)))\n else:\n self.increment = np.linalg.inv(\n self.jacobian).dot(-self.residual)\n self.lin_dep = False\n except np.linalg.linalg.LinAlgError:\n self.increment = self.residual * 0\n\n def solve_control(self):\n r\"\"\"\n Control iteration step of the newton algorithm.\n\n - Calculate the residual value for each equation\n - Calculate the jacobian matrix\n - Calculate new values for variables\n - Restrict fluid properties to value ranges\n - Check component parameters for consistency\n \"\"\"\n self.solve_components()\n self.solve_busses()\n self.solve_connections()\n self.solve_user_defined_eq()\n self.matrix_inversion()\n\n # check for linear dependency\n if self.lin_dep:\n return\n\n # add the increment\n i = 0\n for c in self.conns['object']:\n # mass flow, pressure and enthalpy\n if not c.m.val_set:\n c.m.val_SI += self.increment[i * (self.num_conn_vars)]\n if not c.p.val_set:\n # this prevents negative pressures\n relax = max(1, -self.increment[i * (self.num_conn_vars) + 1] /\n (0.5 * c.p.val_SI))\n c.p.val_SI += self.increment[\n i * (self.num_conn_vars) + 1] / relax\n if not c.h.val_set:\n c.h.val_SI += self.increment[i * (self.num_conn_vars) + 2]\n\n # fluid vector (only if number of fluids is greater than 1)\n if len(self.fluids) > 1:\n j = 0\n for fluid in self.fluids:\n # add increment\n if not c.fluid.val_set[fluid]:\n c.fluid.val[fluid] += (\n self.increment[\n i * (self.num_conn_vars) + 3 + j])\n\n # keep mass fractions within [0, 1]\n if c.fluid.val[fluid] < err:\n c.fluid.val[fluid] = 0\n elif c.fluid.val[fluid] > 1 - err:\n c.fluid.val[fluid] = 1\n\n j += 1\n\n # check the fluid properties for physical ranges\n self.solve_check_props(c)\n i += 1\n\n # increment for the custom variables\n if self.num_comp_vars > 0:\n sum_c_var = 0\n for cp in self.comps['object']:\n for var in cp.vars.keys():\n pos = var.var_pos\n\n # add increment\n var.val += self.increment[\n self.num_conn_vars * len(self.conns) + sum_c_var + pos]\n\n # keep value within specified value range\n if var.val < var.min_val:\n var.val = var.min_val\n elif var.val > var.max_val:\n var.val = var.max_val\n\n sum_c_var += cp.num_vars\n\n # second property check for first three iterations without an init_file\n if self.iter < 3:\n for cp in self.comps['object']:\n cp.convergence_check()\n\n for c in self.conns['object']:\n self.solve_check_props(c)\n\n def property_range_message(self, c, prop):\n r\"\"\"\n Return debugging message for fluid property range adjustments.\n\n Parameters\n ----------\n c : tespy.connections.connection.Connection\n Connection to check fluid properties.\n\n prop : str\n Fluid property.\n\n Returns\n -------\n msg : str\n Debugging message.\n \"\"\"\n msg = (\n fpd[prop]['text'][0].upper() + fpd[prop]['text'][1:] +\n ' out of fluid property range at connection ' + c.label +\n ' adjusting value to ' + str(c.get_attr(prop).val_SI) +\n ' ' + fpd[prop]['SI_unit'] + '.')\n return msg\n\n def solve_check_props(self, c):\n r\"\"\"\n Check for invalid fluid property values.\n\n Parameters\n ----------\n c : tespy.connections.connection.Connection\n Connection to check fluid properties.\n \"\"\"\n fl = hlp.single_fluid(c.fluid.val)\n\n if fl is not None:\n # pressure\n if c.p.val_SI < fp.Memorise.value_range[fl][0] and not c.p.val_set:\n c.p.val_SI = fp.Memorise.value_range[fl][0]\n logging.debug(self.property_range_message(c, 'p'))\n elif (c.p.val_SI > fp.Memorise.value_range[fl][1] and\n not c.p.val_set):\n c.p.val_SI = fp.Memorise.value_range[fl][1]\n logging.debug(self.property_range_message(c, 'p'))\n\n # enthalpy\n try:\n hmin = fp.h_pT(\n c.p.val_SI, fp.Memorise.value_range[fl][2] * 1.001, fl)\n except ValueError:\n f = 1.05\n hmin = fp.h_pT(\n c.p.val_SI, fp.Memorise.value_range[fl][2] * f, fl)\n\n T = fp.Memorise.value_range[fl][3]\n while True:\n try:\n hmax = fp.h_pT(c.p.val_SI, T, fl)\n break\n except ValueError as e:\n T *= 0.99\n if T < fp.Memorise.value_range[fl][2]:\n raise ValueError(e)\n\n if c.h.val_SI < hmin and not c.h.val_set:\n if hmin < 0:\n c.h.val_SI = hmin * 0.9999\n else:\n c.h.val_SI = hmin * 1.0001\n logging.debug(self.property_range_message(c, 'h'))\n\n elif c.h.val_SI > hmax and not c.h.val_set:\n c.h.val_SI = hmax * 0.9999\n logging.debug(self.property_range_message(c, 'h'))\n\n if ((c.Td_bp.val_set or c.state.is_set) and\n not c.h.val_set and self.iter < 3):\n if (c.Td_bp.val_SI > 0 or\n (c.state.val == 'g' and c.state.is_set)):\n h = fp.h_mix_pQ(c.get_flow(), 1)\n if c.h.val_SI < h:\n c.h.val_SI = h * 1.01\n logging.debug(self.property_range_message(c, 'h'))\n elif (c.Td_bp.val_SI < 0 or\n (c.state.val == 'l' and c.state.is_set)):\n h = fp.h_mix_pQ(c.get_flow(), 0)\n if c.h.val_SI > h:\n c.h.val_SI = h * 0.99\n logging.debug(self.property_range_message(c, 'h'))\n\n elif self.iter < 4 and not c.good_starting_values:\n # pressure\n if c.p.val_SI <= self.p_range_SI[0] and not c.p.val_set:\n c.p.val_SI = self.p_range_SI[0]\n logging.debug(self.property_range_message(c, 'p'))\n\n elif c.p.val_SI >= self.p_range_SI[1] and not c.p.val_set:\n c.p.val_SI = self.p_range_SI[1]\n logging.debug(self.property_range_message(c, 'p'))\n\n # enthalpy\n if c.h.val_SI < self.h_range_SI[0] and not c.h.val_set:\n c.h.val_SI = self.h_range_SI[0]\n logging.debug(self.property_range_message(c, 'h'))\n\n elif c.h.val_SI > self.h_range_SI[1] and not c.h.val_set:\n c.h.val_SI = self.h_range_SI[1]\n logging.debug(self.property_range_message(c, 'h'))\n\n # temperature\n if c.T.val_set and not c.h.val_set:\n self.solve_check_temperature(c)\n\n # mass flow\n if c.m.val_SI <= self.m_range_SI[0] and not c.m.val_set:\n c.m.val_SI = self.m_range_SI[0]\n logging.debug(self.property_range_message(c, 'm'))\n\n elif c.m.val_SI >= self.m_range_SI[1] and not c.m.val_set:\n c.m.val_SI = self.m_range_SI[1]\n logging.debug(self.property_range_message(c, 'm'))\n\n def solve_check_temperature(self, c):\n r\"\"\"\n Check if temperature is within user specified limits.\n\n Parameters\n ----------\n c : tespy.connections.connection.Connection\n Connection to check fluid properties.\n \"\"\"\n flow = c.get_flow()\n Tmin = max(\n [fp.Memorise.value_range[f][2] for\n f in flow[3].keys() if flow[3][f] > err]\n ) + 100\n Tmax = min(\n [fp.Memorise.value_range[f][3] for\n f in flow[3].keys() if flow[3][f] > err]\n ) - 100\n hmin = fp.h_mix_pT(flow, Tmin)\n hmax = fp.h_mix_pT(flow, Tmax)\n\n if c.h.val_SI < hmin:\n c.h.val_SI = hmin\n logging.debug(self.property_range_message(c, 'h'))\n\n if c.h.val_SI > hmax:\n c.h.val_SI = hmax\n logging.debug(self.property_range_message(c, 'h'))\n\n def solve_components(self):\n r\"\"\"\n Calculate the residual and derivatives of component equations.\n\n - Iterate through components in network to get residuals and\n derivatives.\n - Place residual values in residual value vector of the network.\n - Place partial derivatives in jacobian matrix of the network.\n \"\"\"\n # fetch component equation residuals and component partial derivatives\n sum_eq = 0\n sum_c_var = 0\n for cp in self.comps['object']:\n\n indices = []\n for c in cp.conn_loc:\n start = c * self.num_conn_vars\n end = (c + 1) * self.num_conn_vars\n indices += [np.arange(start, end)]\n\n cp.solve(self.increment_filter[np.array(indices)])\n\n self.residual[sum_eq:sum_eq + cp.num_eq] = cp.residual\n deriv = cp.jacobian\n\n if deriv is not None:\n i = 0\n # place derivatives in jacobian matrix\n for loc in cp.conn_loc:\n coll_s = loc * self.num_conn_vars\n coll_e = (loc + 1) * self.num_conn_vars\n self.jacobian[\n sum_eq:sum_eq + cp.num_eq, coll_s:coll_e] = deriv[:, i]\n i += 1\n\n # derivatives for custom variables\n for j in range(cp.num_vars):\n coll = self.num_vars - self.num_comp_vars + sum_c_var\n self.jacobian[sum_eq:sum_eq + cp.num_eq, coll] = (\n deriv[:, i + j, :1].transpose()[0])\n sum_c_var += 1\n\n sum_eq += cp.num_eq\n cp.it += 1\n\n def solve_user_defined_eq(self):\n \"\"\"\n Calculate the residual and jacobian of user defined equations.\n\n - Iterate through user defined functions and calculate residual value\n and corresponding jacobian.\n - Place residual values in residual value vector of the network.\n - Place partial derivatives regarding connection parameters in jacobian\n matrix of the network.\n \"\"\"\n row = self.num_comp_eq + self.num_conn_eq + self.num_bus_eq\n for ude in self.user_defined_eq.values():\n self.residual[row] = ude.func(ude)\n jacobian = ude.deriv(ude)\n for c, derivative in jacobian.items():\n col = c.conn_loc * self.num_conn_vars\n self.jacobian[row, col:col + self.num_conn_vars] = derivative\n row += 1\n\n def solve_connections(self):\n r\"\"\"\n Calculate the residual and derivatives of connection equations.\n\n - Iterate through connections in network to get residuals and\n derivatives.\n - Place residual values in residual value vector of the network.\n - Place partial derivatives in jacobian matrix of the network.\n\n Note\n ----\n **Equations**\n\n **mass flow, pressure and enthalpy**\n\n .. math::\n val = 0\n\n **temperatures**\n\n .. math::\n val = T_{j} - T \\left( p_{j}, h_{j}, fluid_{j} \\right)\n\n **volumetric flow**\n\n .. math::\n val = \\dot{V}_{j} - v \\left( p_{j}, h_{j} \\right) \\cdot \\dot{m}_j\n\n **superheating or subcooling** *Works with pure fluids only!*\n\n .. math::\n val = T_{j} - td_{bp} - T_{bp}\\left( p_{j}, fluid_{j} \\right)\n\n \\text{td: temperature difference, bp: boiling point}\n\n **vapour mass fraction** *Works with pure fluids only!*\n\n .. math::\n val = h_{j} - h \\left( p_{j}, x_{j}, fluid_{j} \\right)\n\n **Referenced values**\n\n **mass flow, pressure and enthalpy**\n\n .. math::\n val = x_{j} - x_{j,ref} \\cdot a + b\n\n **temperatures**\n\n .. math::\n val = T \\left( p_{j}, h_{j}, fluid_{j} \\right) -\n T \\left( p_{j}, h_{j}, fluid_{j} \\right) \\cdot a + b\n\n **Derivatives**\n\n **mass flow, pressure and enthalpy**\n\n .. math::\n\n J\\left(\\frac{\\partial f_{i}}{\\partial m_{j}}\\right) = 1\\\\\n \\text{for equation i, connection j}\\\\\n \\text{pressure and enthalpy analogously}\n\n **temperatures**\n\n .. math::\n\n J\\left(\\frac{\\partial f_{i}}{\\partial p_{j}}\\right) =\n -\\frac{\\partial T_{j}}{\\partial p_{j}}\\\\\n J\\left(\\frac{\\partial f_{i}}{\\partial h_{j}}\\right) =\n -\\frac{\\partial T_{j}}{\\partial h_{j}}\\\\\n J\\left(\\frac{\\partial f_{i}}{\\partial fluid_{j,k}}\\right) =\n - \\frac{\\partial T_{j}}{\\partial fluid_{j,k}}\n\n \\forall k \\in \\text{fluid components}\\\\\n \\text{for equation i, connection j}\n\n **volumetric flow**\n\n .. math::\n\n J\\left(\\frac{\\partial f_{i}}{\\partial m_{j}}\\right) =\n -v \\left( p_{j}, h_{j} \\right)\\\\\n J\\left(\\frac{\\partial f_{i}}{\\partial p_{j}}\\right) =\n -\\frac{\\partial v_{j}}{\\partial p_{j}} \\cdot \\dot{m}_j\\\\\n J\\left(\\frac{\\partial f_{i}}{\\partial h_{j}}\\right) =\n -\\frac{\\partial v_{j}}{\\partial h_{j}} \\cdot \\dot{m}_j\\\\\n\n \\forall k \\in \\text{fluid components}\\\\\n \\text{for equation i, connection j}\n\n **superheating or subcooling** *Works with pure fluids only!*\n\n .. math::\n\n J\\left(\\frac{\\partial f_{i}}{\\partial p_{j}}\\right) =\n \\frac{\\partial T \\left( p_{j}, h_{j}, fluid_{j} \\right)}\n {\\partial p_{j}} -\n \\frac{\\partial T_{bp} \\left( p_{j}, fluid_{j} \\right)}\n {\\partial p_{j}} \\\\\n J\\left(\\frac{\\partial f_{i}}{\\partial h_{j}}\\right) =\n \\frac{\\partial T \\left( p_{j}, h_{j}, fluid_{j} \\right)}\n {\\partial h_{j}}\\\\\n\n \\text{for equation i, connection j}\\\\\n \\text{td: temperature difference, bp: boiling point}\n\n **vapour mass fraction** *Works with pure fluids only!*\n\n .. math::\n\n J\\left(\\frac{\\partial f_{i}}{\\partial p_{j}}\\right) =\n -\\frac{\\partial h \\left( p_{j}, x_{j}, fluid_{j} \\right)}\n {\\partial p_{j}}\\\\\n J\\left(\\frac{\\partial f_{i}}{\\partial h_{j}}\\right) = 1\\\\\n \\text{for equation i, connection j, x: vapour mass fraction}\n\n **Referenced values**\n\n **mass flow, pressure and enthalpy**\n\n .. math::\n J\\left(\\frac{\\partial f_{i}}{\\partial m_{j}}\\right) = 1\\\\\n J\\left(\\frac{\\partial f_{i}}{\\partial m_{j,ref}}\\right) = - a\\\\\n \\text{for equation i, connection j}\\\\\n \\text{pressure and enthalpy analogously}\n\n **temperatures**\n\n .. math::\n J\\left(\\frac{\\partial f_{i}}{\\partial p_{j}}\\right) =\n \\frac{dT_{j}}{dp_{j}}\\\\\n J\\left(\\frac{\\partial f_{i}}{\\partial h_{j}}\\right) =\n \\frac{dT_{j}}{dh_{j}}\\\\\n J\\left(\\frac{\\partial f_{i}}{\\partial fluid_{j,k}}\\right) =\n \\frac{dT_{j}}{dfluid_{j,k}}\n \\; , \\forall k \\in \\text{fluid components}\\\\\n J\\left(\\frac{\\partial f_{i}}{\\partial p_{j,ref}}\\right) =\n \\frac{dT_{j,ref}}{dp_{j,ref}} \\cdot a \\\\\n J\\left(\\frac{\\partial f_{i}}{\\partial h_{j,ref}}\\right) =\n \\frac{dT_{j,ref}}{dh_{j,ref}} \\cdot a \\\\\n J\\left(\\frac{\\partial f_{i}}{\\partial fluid_{j,k,ref}}\\right) =\n \\frac{dT_{j}}{dfluid_{j,k,ref}} \\cdot a\n \\; , \\forall k \\in \\text{fluid components}\\\\\n \\text{for equation i, connection j}\n \"\"\"\n k = self.num_comp_eq\n primary_vars = {'m': 0, 'p': 1, 'h': 2}\n for c in self.conns['object']:\n flow = c.get_flow()\n col = c.conn_loc * self.num_conn_vars\n\n # referenced mass flow, pressure or enthalpy\n for var, pos in primary_vars.items():\n if c.get_attr(var).ref_set:\n ref = c.get_attr(var).ref\n ref_col = ref.obj.conn_loc * self.num_conn_vars\n self.residual[k] = (\n c.get_attr(var).val_SI - (\n ref.obj.get_attr(var).val_SI * ref.factor +\n ref.delta_SI))\n self.jacobian[k, col + pos] = 1\n self.jacobian[k, ref_col + pos] = -c.get_attr(var).ref.factor\n k += 1\n\n # temperature\n if c.T.val_set:\n self.residual[k] = fp.T_mix_ph(\n flow, T0=c.T.val_SI) - c.T.val_SI\n\n self.jacobian[k, col + 1] = (\n fp.dT_mix_dph(flow, T0=c.T.val_SI))\n self.jacobian[k, col + 2] = (\n fp.dT_mix_pdh(flow, T0=c.T.val_SI))\n if len(self.fluids) != 1:\n col_s = c.conn_loc * self.num_conn_vars + 3\n col_e = (c.conn_loc + 1) * self.num_conn_vars\n if not all(self.increment_filter[col_s:col_e]):\n self.jacobian[k, col_s:col_e] = fp.dT_mix_ph_dfluid(\n flow, T0=c.T.val_SI)\n k += 1\n\n # referenced temperature\n if c.T.ref_set:\n ref = c.T.ref\n flow_ref = ref.obj.get_flow()\n ref_col = ref.obj.conn_loc * self.num_conn_vars\n self.residual[k] = fp.T_mix_ph(flow, T0=c.T.val_SI) - (\n fp.T_mix_ph(flow_ref, T0=ref.obj.T.val_SI) *\n ref.factor + ref.delta_SI)\n\n self.jacobian[k, col + 1] = (\n fp.dT_mix_dph(flow, T0=c.T.val_SI))\n self.jacobian[k, col + 2] = (\n fp.dT_mix_pdh(flow, T0=c.T.val_SI))\n\n self.jacobian[k, ref_col + 1] = -(\n fp.dT_mix_dph(flow_ref, T0=ref.obj.T.val_SI) * ref.factor)\n self.jacobian[k, ref_col + 2] = -(\n fp.dT_mix_pdh(flow_ref, T0=ref.obj.T.val_SI) * ref.factor)\n\n # dT / dFluid\n if len(self.fluids) != 1:\n col_s = c.conn_loc * self.num_conn_vars + 3\n col_e = (c.conn_loc + 1) * self.num_conn_vars\n ref_col_s = ref.obj.conn_loc * self.num_conn_vars + 3\n ref_col_e = (ref.obj.conn_loc + 1) * self.num_conn_vars\n if not all(self.increment_filter[col_s:col_e]):\n self.jacobian[k, col_s:col_e] = (\n fp.dT_mix_ph_dfluid(flow, T0=c.T.val_SI))\n if not all(self.increment_filter[ref_col_s:ref_col_e]):\n self.jacobian[k, ref_col_s:ref_col_e] = -np.array([\n fp.dT_mix_ph_dfluid(\n flow_ref, T0=ref.obj.T.val_SI)])\n k += 1\n\n # saturated steam fraction\n if c.x.val_set:\n if (np.absolute(self.residual[k]) > err ** 2 or\n self.iter % 2 == 0 or self.always_all_equations):\n self.residual[k] = c.h.val_SI - (\n fp.h_mix_pQ(flow, c.x.val_SI))\n if not self.increment_filter[col + 1]:\n self.jacobian[k, col + 1] = -(\n fp.dh_mix_dpQ(flow, c.x.val_SI))\n self.jacobian[k, col + 2] = 1\n k += 1\n\n # volumetric flow\n if c.v.val_set:\n if (np.absolute(self.residual[k]) > err ** 2 or\n self.iter % 2 == 0 or self.always_all_equations):\n self.residual[k] = (\n fp.v_mix_ph(flow, T0=c.T.val_SI) * c.m.val_SI -\n c.v.val_SI)\n self.jacobian[k, col] = fp.v_mix_ph(flow, T0=c.T.val_SI)\n self.jacobian[k, col + 1] = (\n fp.dv_mix_dph(flow, T0=c.T.val_SI) * c.m.val_SI)\n self.jacobian[k, col + 2] = (\n fp.dv_mix_pdh(flow, T0=c.T.val_SI) * c.m.val_SI)\n k += 1\n\n # temperature difference to boiling point\n if c.Td_bp.val_set:\n if (np.absolute(self.residual[k]) > err ** 2 or\n self.iter % 2 == 0 or self.always_all_equations):\n self.residual[k] = (\n fp.T_mix_ph(flow, T0=c.T.val_SI) - c.Td_bp.val_SI -\n fp.T_bp_p(flow))\n if not self.increment_filter[col + 1]:\n self.jacobian[k, col + 1] = (\n fp.dT_mix_dph(flow, T0=c.T.val_SI) - fp.dT_bp_dp(flow))\n if not self.increment_filter[col + 2]:\n self.jacobian[k, col + 2] = fp.dT_mix_pdh(\n flow, T0=c.T.val_SI)\n k += 1\n\n # fluid composition balance\n if c.fluid.balance:\n j = 0\n res = 1\n for f in self.fluids:\n res -= c.fluid.val[f]\n self.jacobian[k, c.conn_loc + 3 + j] = -1\n j += 1\n\n self.residual[k] = res\n k += 1\n\n # equations and derivatives for specified primary variables are static\n if self.iter == 0:\n for c in self.conns['object']:\n col = c.conn_loc * self.num_conn_vars\n\n # specified mass flow, pressure and enthalpy\n for var, pos in primary_vars.items():\n if c.get_attr(var).val_set:\n self.residual[k] = 0\n self.jacobian[k, col + pos] = 1\n k += 1\n\n j = 0\n # specified fluid mass fraction\n for f in self.fluids:\n if c.fluid.val_set[f]:\n self.jacobian[k, col + 3 + j] = 1\n k += 1\n j += 1\n\n def solve_busses(self):\n r\"\"\"\n Calculate the equations and the partial derivatives for the busses.\n\n - Iterate through busses in network to get residuals and derivatives.\n - Place residual values in residual value vector of the network.\n - Place partial derivatives in jacobian matrix of the network.\n \"\"\"\n row = self.num_comp_eq + self.num_conn_eq\n for bus in self.busses.values():\n if bus.P.is_set:\n P_res = 0\n for cp in bus.comps.index:\n\n P_res -= cp.calc_bus_value(bus)\n deriv = -cp.bus_deriv(bus)\n\n j = 0\n for loc in cp.conn_loc:\n # start collumn index\n coll_s = loc * self.num_conn_vars\n # end collumn index\n coll_e = (loc + 1) * self.num_conn_vars\n self.jacobian[row, coll_s:coll_e] = deriv[:, j]\n j += 1\n\n self.residual[row] = bus.P.val + P_res\n row += 1\n\n def postprocessing(self):\n r\"\"\"Calculate connection, bus and component parameters.\"\"\"\n\n self.process_connections()\n self.process_components()\n self.process_busses()\n\n msg = 'Postprocessing complete.'\n logging.info(msg)\n\n def process_connections(self):\n \"\"\"Process the Connection results.\"\"\"\n for c in self.conns['object']:\n flow = c.get_flow()\n c.good_starting_values = True\n c.T.val_SI = fp.T_mix_ph(flow, T0=c.T.val_SI)\n fluid = hlp.single_fluid(c.fluid.val)\n if (fluid is None and\n abs(\n fp.h_mix_pT(flow, c.T.val_SI) - c.h.val_SI\n ) > err ** .5):\n c.T.val_SI = np.nan\n c.vol.val_SI = np.nan\n c.v.val_SI = np.nan\n c.s.val_SI = np.nan\n msg = (\n 'Could not find a feasible value for mixture temperature '\n 'at connection ' + c.label + '. The values for '\n 'temperature, specific volume, volumetric flow and '\n 'entropy are set to nan.')\n logging.error(msg)\n\n else:\n c.vol.val_SI = fp.v_mix_ph(flow, T0=c.T.val_SI)\n c.v.val_SI = c.vol.val_SI * c.m.val_SI\n c.s.val_SI = fp.s_mix_ph(flow, T0=c.T.val_SI)\n if fluid is not None:\n if not c.x.val_set:\n c.x.val_SI = fp.Q_ph(c.p.val_SI, c.h.val_SI, fluid)\n if not c.Td_bp.val_set:\n c.Td_bp.val_SI = np.nan\n\n for prop in fpd.keys():\n c.get_attr(prop).val = hlp.convert_from_SI(\n prop, c.get_attr(prop).val_SI, c.get_attr(prop).unit)\n\n c.m.val0 = c.m.val\n c.p.val0 = c.p.val\n c.h.val0 = c.h.val\n c.fluid.val0 = c.fluid.val.copy()\n\n self.results['Connection'].loc[c.label] = (\n [c.m.val, c.p.val, c.h.val, c.T.val, c.v.val, c.vol.val,\n c.s.val, c.x.val, c.Td_bp.val] +\n [f for f in c.fluid.val.values()])\n\n def process_components(self):\n \"\"\"Process the component results.\"\"\"\n # components\n for cp in self.comps['object']:\n cp.calc_parameters()\n cp.check_parameter_bounds()\n\n key = cp.__class__.__name__\n for param in self.results[key].columns:\n p = cp.get_attr(param)\n if (p.func is not None or (p.func is None and p.is_set) or\n p.is_result):\n self.results[key].loc[cp.label, param] = p.val\n else:\n self.results[key].loc[cp.label, param] = np.nan\n\n def process_busses(self):\n \"\"\"Process the bus results.\"\"\"\n # busses\n for b in self.busses.values():\n for cp in b.comps.index:\n # get components bus func value\n bus_val = cp.calc_bus_value(b)\n eff = cp.calc_bus_efficiency(b)\n cmp_val = cp.bus_func(b.comps.loc[cp])\n\n b.comps.loc[cp, 'char'].get_domain_errors(\n cp.calc_bus_expr(b), cp.label)\n\n # save as reference value\n if self.mode == 'design':\n if b.comps.loc[cp, 'base'] == 'component':\n design_value = cmp_val\n else:\n design_value = bus_val\n\n b.comps.loc[cp, 'P_ref'] = design_value\n\n else:\n design_value = b.comps.loc[cp, 'P_ref']\n\n self.results[b.label].loc[cp.label] = (\n [cmp_val, bus_val, eff, design_value])\n\n b.P.val = self.results[b.label]['bus value'].sum()\n\n# %% printing and plotting\n\n def print_results(self, colored=True, colors={}):\n r\"\"\"Print the calculations results to prompt.\"\"\"\n # Define colors for highlighting values in result table\n coloring = {\n 'end': '\\033[0m',\n 'set': '\\033[94m',\n 'err': '\\033[31m',\n 'var': '\\033[32m'\n }\n coloring.update(colors)\n\n if not hasattr(self, 'results'):\n msg = (\n 'It is not possible to print the results of a network, that '\n 'has never been solved successfully. Results DataFrames are '\n 'only available after a full simulation run is performed.')\n raise hlp.TESPyNetworkError(msg)\n\n for cp in self.comps['comp_type'].unique():\n df = self.results[cp].copy()\n\n # are there any parameters to print?\n if df.size > 0:\n cols = df.columns\n if len(cols) > 0:\n for col in cols:\n df[col] = df.apply(\n self.print_components, axis=1,\n args=(col, colored, coloring))\n\n df.dropna(how='all', inplace=True)\n\n if len(df) > 0:\n # printout with tabulate\n print('##### RESULTS (' + cp + ') #####')\n print(tabulate(\n df, headers='keys', tablefmt='psql',\n floatfmt='.2e'))\n\n # connection properties\n df = self.results['Connection'].loc[:, ['m', 'p', 'h', 'T']]\n for c in df.index:\n if not self.get_conn(c).printout:\n df.drop([c], axis=0, inplace=True)\n\n elif colored:\n conn = self.get_conn(c)\n for col in df.columns:\n if conn.get_attr(col).val_set:\n df.loc[c, col] = (\n coloring['set'] + str(conn.get_attr(col).val) +\n coloring['end'])\n\n if len(df) > 0:\n print('##### RESULTS (Connection) #####')\n print(\n tabulate(df, headers='keys', tablefmt='psql', floatfmt='.3e'))\n\n for b in self.busses.values():\n if b.printout:\n df = self.results[b.label].loc[\n :, ['component value', 'bus value', 'efficiency']]\n df.loc['total'] = df.sum()\n df.loc['total', 'efficiency'] = np.nan\n if colored and b.P.is_set:\n df.loc['total', 'bus value'] = (\n coloring['set'] + str(df.loc['total', 'bus value']) +\n coloring['end'])\n print('##### RESULTS (Bus: ' + b.label + ') #####')\n print(tabulate(df, headers='keys', tablefmt='psql',\n floatfmt='.3e'))\n\n def print_components(self, c, *args):\n \"\"\"\n Get the print values for the component data.\n\n Parameters\n ----------\n c : pandas.core.series.Series\n Series containing the component data.\n\n param : str\n Component parameter to print.\n\n colored : booloean\n Color the printout.\n\n coloring : dict\n Coloring information for colored printout.\n\n Returns\n ----------\n value : str\n String representation of the value to print.\n \"\"\"\n param, colored, coloring = args\n comp = self.get_comp(c.name)\n if comp.printout:\n # select parameter from results DataFrame\n val = c[param]\n if not colored:\n return str(val)\n # else part\n if (val < comp.get_attr(param).min_val - err or\n val > comp.get_attr(param).max_val + err):\n return coloring['err'] + ' ' + str(val) + ' ' + coloring['end']\n if comp.get_attr(args[0]).is_var:\n return coloring['var'] + ' ' + str(val) + ' ' + coloring['end']\n if comp.get_attr(args[0]).is_set:\n return coloring['set'] + ' ' + str(val) + ' ' + coloring['end']\n return str(val)\n else:\n return np.nan\n\n# %% saving\n\n def save(self, path, **kwargs):\n r\"\"\"\n Save the results to results files.\n\n Parameters\n ----------\n filename : str\n Path for the results.\n\n Note\n ----\n Results will be saved to path. The results contain:\n\n - network.json (network information)\n - connections.csv (connection information)\n - folder components containing .csv files for busses and\n characteristics as well as .csv files for all types of components\n within your network.\n \"\"\"\n if path[-1] != '/' and path[-1] != '\\\\':\n path += '/'\n path = hlp.modify_path_os(path)\n\n logging.debug('Saving network to path ' + path + '.')\n # creat path, if non existent\n if not os.path.exists(path):\n os.makedirs(path)\n\n # create path for component folder if non existent\n path_comps = hlp.modify_path_os(path + 'components/')\n if not os.path.exists(path_comps):\n os.makedirs(path_comps)\n\n # save all network information\n self.save_network(path + 'network.json')\n self.save_connections(path + 'connections.csv')\n self.save_components(path_comps)\n self.save_busses(path_comps + 'bus.csv')\n self.save_characteristics(path_comps)\n\n def save_network(self, fn):\n r\"\"\"\n Save basic network configuration.\n\n Parameters\n ----------\n fn : str\n Path/filename for the network configuration file.\n \"\"\"\n data = {}\n data['m_unit'] = self.m_unit\n data['m_range'] = list(self.m_range)\n data['p_unit'] = self.p_unit\n data['p_range'] = list(self.p_range)\n data['h_unit'] = self.h_unit\n data['h_range'] = list(self.h_range)\n data['T_unit'] = self.T_unit\n data['x_unit'] = self.x_unit\n data['v_unit'] = self.v_unit\n data['s_unit'] = self.s_unit\n data['fluids'] = self.fluids_backends\n\n with open(fn, 'w') as f:\n f.write(json.dumps(data, indent=4))\n\n logging.debug('Network information saved to ' + fn + '.')\n\n def save_connections(self, fn):\n r\"\"\"\n Save the connection properties.\n\n - Uses connections object id as row identifier and saves\n\n - connections source and target as well as\n - properties with references and\n - fluid vector (including user specification if structure is True).\n\n - Connections source and target are identified by its labels.\n\n Parameters\n ----------\n fn : str\n Path/filename for the file.\n \"\"\"\n f = Network.get_props\n df = self.conns.copy()\n df.set_index('object', inplace=True)\n # connection id\n df['id'] = df.apply(Network.get_id, axis=1)\n cols = df.columns.tolist()\n df = df[cols[-1:] + cols[:-1]]\n\n # general connection parameters\n # source\n df['source'] = df.apply(f, axis=1, args=('source', 'label'))\n # target\n df['target'] = df.apply(f, axis=1, args=('target', 'label'))\n\n # design and offdesign properties\n cols = ['label', 'design', 'offdesign', 'design_path', 'local_design',\n 'local_offdesign', 'label']\n\n for key in cols:\n df[key] = df.apply(f, axis=1, args=(key,))\n\n # fluid properties\n cols = ['m', 'p', 'h', 'T', 'x', 'v', 'Td_bp']\n for key in cols:\n # values and units\n df[key] = df.apply(f, axis=1, args=(key, 'val'))\n df[key + '_unit'] = df.apply(f, axis=1, args=(key, 'unit'))\n df[key + '0'] = df.apply(f, axis=1, args=(key, 'val0'))\n df[key + '_set'] = df.apply(f, axis=1, args=(key, 'val_set'))\n df[key + '_ref'] = df.apply(\n f, axis=1, args=(key, 'ref', 'obj',)).astype(str)\n df[key + '_ref'] = df[key + '_ref'].str.extract(\n r' at (.*?)>', expand=False)\n df[key + '_ref_f'] = df.apply(\n f, axis=1, args=(key, 'ref', 'factor',))\n df[key + '_ref_d'] = df.apply(\n f, axis=1, args=(key, 'ref', 'delta',))\n df[key + '_ref_set'] = df.apply(f, axis=1, args=(key, 'ref_set',))\n\n # state property\n key = 'state'\n df[key] = df.apply(f, axis=1, args=(key, 'val'))\n df[key + '_set'] = df.apply(f, axis=1, args=(key, 'is_set'))\n\n # fluid composition\n for val in self.fluids:\n # fluid mass fraction\n df[val] = df.apply(f, axis=1, args=('fluid', 'val', val))\n\n # fluid mass fraction parametrisation\n df[val + '0'] = df.apply(f, axis=1, args=('fluid', 'val0', val))\n df[val + '_set'] = df.apply(\n f, axis=1, args=('fluid', 'val_set', val))\n\n # fluid balance\n df['balance'] = df.apply(f, axis=1, args=('fluid', 'balance'))\n\n df.to_csv(fn, sep=';', decimal='.', index=False, na_rep='nan')\n logging.debug('Connection information saved to ' + fn + '.')\n\n def save_components(self, path):\n r\"\"\"\n Save the component properties.\n\n - Uses components labels as row identifier.\n - Writes:\n\n - component's incomming and outgoing connections (object id) and\n - component's parametrisation.\n\n Parameters\n ----------\n path : str\n Path/filename for the file.\n \"\"\"\n busses = self.busses.values()\n # create / overwrite csv file\n\n df_comps = self.comps.copy()\n df_comps.set_index('object', inplace=True)\n\n # busses\n df_comps['busses'] = df_comps.apply(\n Network.get_busses, axis=1, args=(busses,))\n\n for var in ['param', 'P_ref', 'char', 'base']:\n df_comps['bus_' + var] = df_comps.apply(\n Network.get_bus_data, axis=1, args=(busses, var))\n\n pd.options.mode.chained_assignment = None\n f = Network.get_props\n for c in df_comps['comp_type'].unique():\n df = df_comps[df_comps['comp_type'] == c]\n\n # basic information\n cols = ['label', 'design', 'offdesign', 'design_path',\n 'local_design', 'local_offdesign']\n for col in cols:\n df[col] = df.apply(f, axis=1, args=(col,))\n\n # attributes\n for col, data in df.index[0].variables.items():\n # component characteristics container\n if isinstance(data, dc_cc) or isinstance(data, dc_cm):\n df[col] = df.apply(\n f, axis=1, args=(col, 'char_func')).astype(str)\n df[col] = df[col].str.extract(r' at (.*?)>', expand=False)\n df[col + '_set'] = df.apply(\n f, axis=1, args=(col, 'is_set'))\n df[col + '_param'] = df.apply(\n f, axis=1, args=(col, 'param'))\n\n # component property container\n elif isinstance(data, dc_cp):\n df[col] = df.apply(f, axis=1, args=(col, 'val'))\n df[col + '_set'] = df.apply(\n f, axis=1, args=(col, 'is_set'))\n df[col + '_var'] = df.apply(\n f, axis=1, args=(col, 'is_var'))\n\n # component property container\n elif isinstance(data, dc_simple):\n df[col] = df.apply(f, axis=1, args=(col, 'val'))\n df[col + '_set'] = df.apply(\n f, axis=1, args=(col, 'is_set'))\n\n # component property container\n elif isinstance(data, dc_gcp):\n df[col] = df.apply(f, axis=1, args=(col, 'method'))\n\n df.set_index('label', inplace=True)\n fn = path + c + '.csv'\n df.to_csv(fn, sep=';', decimal='.', index=True, na_rep='nan')\n logging.debug(\n 'Component information (' + c + ') saved to ' + fn + '.')\n\n def save_busses(self, fn):\n r\"\"\"\n Save the bus properties.\n\n Parameters\n ----------\n fn : str\n Path/filename for the file.\n \"\"\"\n if len(self.busses) > 0:\n df = pd.DataFrame(\n {'id': self.busses.values()}, index=self.busses.values(),\n dtype='object')\n df['label'] = df.apply(Network.get_props, axis=1, args=('label',))\n df['P'] = df.apply(Network.get_props, axis=1, args=('P', 'val'))\n df['P_set'] = df.apply(Network.get_props, axis=1,\n args=('P', 'is_set'))\n df.drop('id', axis=1, inplace=True)\n\n df.set_index('label', inplace=True)\n df.to_csv(fn, sep=';', decimal='.', index=True, na_rep='nan')\n logging.debug('Bus information saved to ' + fn + '.')\n\n def save_characteristics(self, path):\n r\"\"\"\n Save the characteristics.\n\n Parameters\n ----------\n fn : str\n Path/filename for the file.\n \"\"\"\n # characteristic lines in components\n char_lines = []\n char_maps = []\n for c in self.comps['object']:\n for col, data in c.variables.items():\n if isinstance(data, dc_cc):\n char_lines += [data.char_func]\n elif isinstance(data, dc_cm):\n char_maps += [data.char_func]\n\n # characteristic lines in busses\n for bus in self.busses.values():\n for c in bus.comps.index:\n ch = bus.comps.loc[c, 'char']\n if ch not in char_lines:\n char_lines += [ch]\n\n # characteristic line export\n if len(char_lines) > 0:\n # get id and data\n df = pd.DataFrame(\n {'id': char_lines}, index=char_lines, dtype='object')\n df['id'] = df.apply(Network.get_id, axis=1)\n df['type'] = df.apply(Network.get_class_base, axis=1)\n\n cols = ['x', 'y', 'extrapolate']\n for val in cols:\n df[val] = df.apply(Network.get_props, axis=1, args=(val,))\n\n # write to char.csv\n fn = path + 'char_line.csv'\n df.to_csv(fn, sep=';', decimal='.', index=False, na_rep='nan')\n logging.debug(\n 'Characteristic line information saved to ' + fn + '.')\n\n if len(char_maps) > 0:\n # get id and data\n df = pd.DataFrame(\n {'id': char_maps}, index=char_maps, dtype='object')\n df['id'] = df.apply(Network.get_id, axis=1)\n df['type'] = df.apply(Network.get_class_base, axis=1)\n\n cols = ['x', 'y', 'z']\n for val in cols:\n df[val] = df.apply(Network.get_props, axis=1, args=(val,))\n\n # write to char_map.csv\n fn = path + 'char_map.csv'\n df.to_csv(fn, sep=';', decimal='.', index=False, na_rep='nan')\n logging.debug(\n 'Characteristic map information saved to ' + fn + '.')\n\n @staticmethod\n def get_id(c):\n \"\"\"Return the id of the python object.\"\"\"\n return str(c.name)[str(c.name).find(' at ') + 4:-1]\n\n @staticmethod\n def get_class_base(c):\n \"\"\"Return the class name.\"\"\"\n return c.name.__class__.__name__\n\n @staticmethod\n def get_props(c, *args):\n \"\"\"Return properties.\"\"\"\n if hasattr(c.name, args[0]):\n if (not isinstance(c.name.get_attr(args[0]), int) and\n not isinstance(c.name.get_attr(args[0]), str) and\n not isinstance(c.name.get_attr(args[0]), float) and\n not isinstance(c.name.get_attr(args[0]), list) and\n not isinstance(c.name.get_attr(args[0]), np.ndarray) and\n not isinstance(c.name.get_attr(args[0]), con.Connection)):\n if len(args) == 1:\n return c.name.get_attr(args[0])\n elif args[0] == 'fluid' and args[1] != 'balance':\n return c.name.fluid.get_attr(args[1])[args[2]]\n elif args[1] == 'ref':\n obj = c.name.get_attr(args[0]).get_attr(args[1])\n if obj is not None:\n return obj.get_attr(args[2])\n else:\n return np.nan\n else:\n return c.name.get_attr(args[0]).get_attr(args[1])\n elif isinstance(c.name.get_attr(args[0]), np.ndarray):\n if len(c.name.get_attr(args[0]).shape) > 1:\n return tuple(c.name.get_attr(args[0]).tolist())\n else:\n return c.name.get_attr(args[0]).tolist()\n else:\n return c.name.get_attr(args[0])\n\n @staticmethod\n def get_busses(c, *args):\n \"\"\"Return the list of busses a component is integrated in.\"\"\"\n busses = []\n for bus in args[0]:\n if c.name in bus.comps.index:\n busses += [bus.label]\n return busses\n\n @staticmethod\n def get_bus_data(c, *args):\n \"\"\"Return bus information of a component.\"\"\"\n items = []\n if args[1] == 'char':\n for bus in args[0]:\n if c.name in bus.comps.index:\n val = bus.comps.loc[c.name, args[1]]\n items += [str(val)[str(val).find(' at ') + 4:-1]]\n\n else:\n for bus in args[0]:\n if c.name in bus.comps.index:\n items += [bus.comps.loc[c.name, args[1]]]\n\n return items\n" ]
[ [ "pandas.read_csv", "numpy.absolute", "pandas.Series", "numpy.linalg.inv", "numpy.arange", "numpy.linalg.norm", "pandas.DataFrame", "numpy.ones", "numpy.array", "numpy.zeros" ] ]
smittal10/Alfred_OnDevice
[ "7c10663e2056e6dc4dcdc943e8f70602c71089d4" ]
[ "models/nn/transformer.py" ]
[ "\"\"\"\nThe implementation of the Transformer encoder and decoder is adopted from Yu-Hsiang Huang's implementation available in:\nhttps://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/master/transformer/Layers.py\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nimport numpy as np\n\n\nclass ScaledDotProductAttention(nn.Module):\n ''' Scaled Dot-Product Attention '''\n\n def __init__(self, temperature, attn_dropout=0.1):\n super().__init__()\n self.temperature = temperature\n self.dropout = nn.Dropout(attn_dropout)\n\n def forward(self, q, k, v, mask=None):\n # q: b x n x lq x dv\n # attn: b x n x lq x lv\n\n attn = torch.matmul(q / self.temperature, k.transpose(2, 3))\n\n if mask is not None:\n attn = attn.masked_fill(mask == 0, -1e9)\n\n attn = self.dropout(F.softmax(attn, dim=-1))\n output = torch.matmul(attn, v)\n\n return output, attn\n\n\nclass MultiHeadAttention(nn.Module):\n ''' Multi-Head Attention module '''\n\n def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):\n super().__init__()\n\n self.n_head = n_head\n self.d_k = d_k\n self.d_v = d_v\n\n self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)\n self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)\n self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)\n self.fc = nn.Linear(n_head * d_v, d_model, bias=False)\n\n self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)\n\n self.dropout = nn.Dropout(dropout)\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n\n\n def forward(self, q, k, v, mask=None):\n\n d_k, d_v, n_head = self.d_k, self.d_v, self.n_head\n sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)\n\n residual = q\n\n # Pass through the pre-attention projection: b x lq x (n*dv)\n # Separate different heads: b x lq x n x dv\n q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)\n k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)\n v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)\n\n # Transpose for attention dot product: b x n x lq x dv\n q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)\n\n if mask is not None:\n mask = mask.unsqueeze(1) # For head axis broadcasting.\n\n q, attn = self.attention(q, k, v, mask=mask)\n\n # Transpose to move the head dimension back: b x lq x n x dv\n # Combine the last two dimensions to concatenate all the heads together: b x lq x (n*dv)\n q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)\n q = self.dropout(self.fc(q))\n q += residual\n\n q = self.layer_norm(q)\n\n return q, attn\n\n\nclass PositionwiseFeedForward(nn.Module):\n ''' A two-feed-forward-layer module '''\n\n def __init__(self, d_in, d_hid, dropout=0.1):\n super().__init__()\n self.w_1 = nn.Linear(d_in, d_hid) # position-wise\n self.w_2 = nn.Linear(d_hid, d_in) # position-wise\n self.layer_norm = nn.LayerNorm(d_in, eps=1e-6)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n\n residual = x\n\n x = self.w_2(F.relu(self.w_1(x)))\n x = self.dropout(x)\n x += residual\n\n x = self.layer_norm(x)\n\n return x\n\n\nclass EncoderLayer(nn.Module):\n ''' Compose with two layers '''\n\n def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):\n super(EncoderLayer, self).__init__()\n self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)\n self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)\n\n def forward(self, enc_input, slf_attn_mask=None):\n enc_output, enc_slf_attn = self.slf_attn(\n enc_input, enc_input, enc_input, mask=slf_attn_mask)\n enc_output = self.pos_ffn(enc_output)\n return enc_output, enc_slf_attn\n\n\nclass DecoderLayer(nn.Module):\n ''' Compose with three layers '''\n\n def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):\n super(DecoderLayer, self).__init__()\n self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)\n self.enc_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)\n self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)\n\n def forward(\n self, dec_input, enc_output,\n slf_attn_mask=None, dec_enc_attn_mask=None):\n dec_output, dec_slf_attn = self.slf_attn(\n dec_input, dec_input, dec_input, mask=slf_attn_mask)\n dec_output, dec_enc_attn = self.enc_attn(\n dec_output, enc_output, enc_output, mask=dec_enc_attn_mask)\n dec_output = self.pos_ffn(dec_output)\n return dec_output, dec_slf_attn, dec_enc_attn\n\n\ndef get_pad_mask(seq, pad_idx):\n return (seq != pad_idx).unsqueeze(-2)\n\n\ndef get_subsequent_mask(seq):\n ''' For masking out the subsequent info. '''\n sz_b, len_s = seq.size()\n subsequent_mask = (1 - torch.triu(\n torch.ones((1, len_s, len_s), device=seq.device), diagonal=1)).bool()\n return subsequent_mask\n\n\n# class PositionalEncoding(nn.Module):\n\n# def __init__(self, d_hid, n_position=200):\n# super(PositionalEncoding, self).__init__()\n\n# # Not a parameter\n# self.register_buffer('pos_table', self._get_sinusoid_encoding_table(n_position, d_hid))\n\n# def _get_sinusoid_encoding_table(self, n_position, d_hid):\n# ''' Sinusoid position encoding table '''\n# # TODO: make it with torch instead of numpy\n\n# def get_position_angle_vec(position):\n# return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]\n\n# sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])\n# sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i\n# sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1\n\n# return torch.FloatTensor(sinusoid_table).unsqueeze(0)\n\n# def forward(self, x):\n# return x + self.pos_table[:, :x.size(1)].clone().detach()\n\n\nclass SegmentEncoding(nn.Module):\n\n def __init__(self, d_hid, seg_num):\n super(SegmentEncoding, self).__init__()\n self.seg_emb = nn.Embedding(seg_num, d_hid)\n\n def forward(self, x):\n return x + self.seg_emb[:, :x.size(1)]\n\n\n\nclass Encoder(nn.Module):\n ''' A encoder model with self attention mechanism. '''\n\n def __init__(self, n_layers, n_head, d_model, d_inner, dropout):\n\n super().__init__()\n\n # self.position_enc = PositionalEncoding(d_model, n_position=n_position)\n d_k = d_v = d_model // n_head\n\n self.dropout = nn.Dropout(p=dropout)\n self.layer_stack = nn.ModuleList([\n EncoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)\n for _ in range(n_layers)])\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n\n def forward(self, src_vec_seq, src_mask):\n\n enc_slf_attn_list = []\n\n # -- Forward\n\n enc_output = self.dropout(src_vec_seq)\n enc_output = self.layer_norm(enc_output)\n\n src_mask = get_pad_mask(src_mask, 0)\n\n for enc_layer in self.layer_stack:\n enc_output, enc_slf_attn = enc_layer(enc_output, slf_attn_mask=src_mask)\n enc_slf_attn_list += [enc_slf_attn]\n\n return enc_output, enc_slf_attn_list\n\n\nclass Decoder(nn.Module):\n ''' A decoder model with self attention mechanism. '''\n\n def __init__(self, n_layers, n_head, d_model, d_inner, dropout):\n\n super().__init__()\n\n # self.position_enc = PositionalEncoding(d_model, n_position=n_position)\n self.dropout = nn.Dropout(p=dropout)\n d_k = d_v = d_model // n_head\n self.layer_stack = nn.ModuleList([\n DecoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)\n for _ in range(n_layers)])\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n\n def forward(self, trg_vec_seq, trg_mask, enc_output, src_mask, return_attns=False):\n\n dec_slf_attn_list, dec_enc_attn_list = [], []\n\n # -- Forward\n dec_output = self.dropout(trg_vec_seq)\n dec_output = self.layer_norm(dec_output)\n\n for dec_layer in self.layer_stack:\n dec_output, dec_slf_attn, dec_enc_attn = dec_layer(\n dec_output, enc_output, slf_attn_mask=trg_mask, dec_enc_attn_mask=src_mask)\n dec_slf_attn_list += [dec_slf_attn] if return_attns else []\n dec_enc_attn_list += [dec_enc_attn] if return_attns else []\n\n if return_attns:\n return dec_output, dec_slf_attn_list, dec_enc_attn_list\n return dec_output,\n\n\nclass Transformer(nn.Module):\n ''' A sequence to sequence model with attention mechanism. '''\n\n def __init__(\n self, n_src_vocab, n_trg_vocab, src_pad_idx, trg_pad_idx,\n d_word_vec=512, d_model=512, d_inner=2048,\n n_layers=6, n_head=8, d_k=64, d_v=64, dropout=0.1, n_position=200,\n trg_emb_prj_weight_sharing=True, emb_src_trg_weight_sharing=True):\n\n super().__init__()\n\n self.src_pad_idx, self.trg_pad_idx = src_pad_idx, trg_pad_idx\n\n self.encoder = Encoder(\n n_src_vocab=n_src_vocab, n_position=n_position,\n d_word_vec=d_word_vec, d_model=d_model, d_inner=d_inner,\n n_layers=n_layers, n_head=n_head, d_k=d_k, d_v=d_v,\n pad_idx=src_pad_idx, dropout=dropout)\n\n self.decoder = Decoder(\n n_trg_vocab=n_trg_vocab, n_position=n_position,\n d_word_vec=d_word_vec, d_model=d_model, d_inner=d_inner,\n n_layers=n_layers, n_head=n_head, d_k=d_k, d_v=d_v,\n pad_idx=trg_pad_idx, dropout=dropout)\n\n self.trg_word_prj = nn.Linear(d_model, n_trg_vocab, bias=False)\n\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n assert d_model == d_word_vec, \\\n 'To facilitate the residual connections, \\\n the dimensions of all module outputs shall be the same.'\n\n self.x_logit_scale = 1.\n if trg_emb_prj_weight_sharing:\n # Share the weight between target word embedding & last dense layer\n self.trg_word_prj.weight = self.decoder.trg_word_emb.weight\n self.x_logit_scale = (d_model ** -0.5)\n\n if emb_src_trg_weight_sharing:\n self.encoder.src_word_emb.weight = self.decoder.trg_word_emb.weight\n\n\n def forward(self, src_seq, trg_seq):\n\n src_mask = get_pad_mask(src_seq, self.src_pad_idx)\n trg_mask = get_pad_mask(trg_seq, self.trg_pad_idx) & get_subsequent_mask(trg_seq)\n\n enc_output, *_ = self.encoder(src_seq, src_mask)\n dec_output, *_ = self.decoder(trg_seq, trg_mask, enc_output, src_mask)\n seq_logit = self.trg_word_prj(dec_output) * self.x_logit_scale\n\n return seq_logit.view(-1, seq_logit.size(2))" ]
[ [ "torch.nn.Dropout", "torch.nn.functional.softmax", "torch.ones", "torch.nn.Embedding", "torch.nn.LayerNorm", "torch.nn.Linear", "torch.matmul", "torch.nn.init.xavier_uniform_" ] ]
he-actlab/waveq.code
[ "a205d3fa22a41d5f4fc1ef1e5698c4f1dbb11e6a" ]
[ "code/examples/classifier_compression/sinreq_v2_svhn_runcode/run_sinreq-Learn_svhn.py" ]
[ "#!/usr/bin/python\r\n\r\n# developer: Ahmed Taha Elthakeb\r\n# email: ([email protected])\r\n\r\n\"\"\"\r\n[21-oct-2018]\r\n- test case: alexnet \r\n- changing reward function to be func(val_acc + train_acc) on 10k images \r\n[29-Dec-2019]\r\n- SinReQ + Learning the frequency of sinreq regularizer\r\n- adding one more variable to the optimization objectives\r\n\"\"\"\r\n\r\nfrom __future__ import division\r\nimport pandas as pd\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport pickle\r\nimport os\r\n#import dataset\r\n#from . import dataset, networks, load\r\nimport sys\r\nsys.path.append(\"..\")\r\nimport dataset, networks, load, quantize\r\n\r\n#import networks\r\n#from networks import *\r\nfrom networks import helper\r\nfrom networks import alexnet, resnet18, svhn_net, lenet\r\nfrom tensorflow.examples.tutorials.mnist import input_data as mnist_input\r\n#import load\r\nimport json\r\nfrom quantize import quantize_network\r\nimport six\r\nimport csv\r\nimport time \r\nfrom datetime import datetime\r\nimport math\r\npi = math.pi\r\n\"\"\"\r\nfrom __future__ import division\r\nimport pandas as pd\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport pickle\r\nimport os\r\n#import dataset\r\n\r\n#from . import dataset, networks, load\r\nimport sys\r\nsys.path.append(\"..\")\r\nimport dataset, networks, load, quantize\r\n\r\n#import networks\r\n#from networks import *\r\nfrom networks import helper\r\nfrom networks import alexnet, resnet18\r\nfrom tensorflow.examples.tutorials.mnist import input_data as mnist_input\r\n#import load\r\nimport json\r\nfrom quantize import quantize_network\r\nimport six\r\n\"\"\"\r\n# remove!!\r\n#setattr(tf.contrib.rnn.GRUCell, 'deepcopy', lambda self, _: self)\r\n#setattr(tf.contrib.rnn.BasicLSTMCell, 'deepcopy', lambda self, _: self)\r\n#setattr(tf.contrib.rnn.MultiRNNCell, 'deepcopy', lambda self, _: self)\r\n\r\n\r\n#NETWORKS = ['alexnet', 'googlenet', 'nin', 'resnet18', 'resnet50', 'squeezenet', 'vgg16net', 'lenet']\r\nNETWORKS = ['lenet']\r\nIMAGE_PATH_TRAIN = '/home/ahmed/projects/NN_quant/ILSVRC2012_img_val_40K/'\r\nIMAGE_PATH_TEST = '/home/ahmed/projects/NN_quant/ILSVRC2012_img_val_10K/'\r\nIMAGE_LABLE = '/home/ahmed/projects/NN_quant/rlbitwidth.code/val.txt'\r\nCKPT_PATH = '/home/ahmed/projects/NN_quant/rlbitwidth.tfmodels/caffe2tf/tfmodels/'\r\nNET_ACC = {'alexnet': 79.918, 'googlenet': 89.002, 'nin': 81.218, 'resnet18': 85.016, \r\n\t'resnet50': 91.984, 'squeezenet': 80.346, 'vgg16net': 89.816, 'lenet': 99.06}\r\n\r\nnum_train_examples = 212382\r\nnum_val_examples = 23372\r\nnum_test_examples = 13068\r\nbatch_size_val = 128\r\nbatch_size_train = 32\r\n\r\nclass Donkey(object):\r\n @staticmethod\r\n def _preprocess(image):\r\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\r\n image = tf.multiply(tf.subtract(image, 0.5), 2)\r\n image = tf.reshape(image, [64, 64, 3])\r\n image = tf.random_crop(image, [54, 54, 3])\r\n return image\r\n\r\n @staticmethod\r\n def _read_and_decode(filename_queue):\r\n reader = tf.TFRecordReader()\r\n _, serialized_example = reader.read(filename_queue)\r\n features = tf.parse_single_example(\r\n serialized_example,\r\n features={\r\n 'image': tf.FixedLenFeature([], tf.string),\r\n 'length': tf.FixedLenFeature([], tf.int64),\r\n 'digits': tf.FixedLenFeature([5], tf.int64)\r\n })\r\n\r\n image = Donkey._preprocess(tf.decode_raw(features['image'], tf.uint8))\r\n length = tf.cast(features['length'], tf.int32)\r\n digits = tf.cast(features['digits'], tf.int32)\r\n return image, length, digits\r\n\r\n @staticmethod\r\n def build_batch(path_to_tfrecords_file, num_examples, batch_size, shuffled):\r\n assert tf.gfile.Exists(path_to_tfrecords_file), '%s not found' % path_to_tfrecords_file\r\n\r\n filename_queue = tf.train.string_input_producer([path_to_tfrecords_file], num_epochs=None)\r\n image, length, digits = Donkey._read_and_decode(filename_queue)\r\n\r\n min_queue_examples = int(0.4 * num_examples)\r\n if shuffled:\r\n image_batch, length_batch, digits_batch = tf.train.shuffle_batch([image, length, digits],\r\n batch_size=batch_size,\r\n num_threads=2,\r\n capacity=min_queue_examples + 3 * batch_size,\r\n min_after_dequeue=min_queue_examples)\r\n else:\r\n image_batch, length_batch, digits_batch = tf.train.batch([image, length, digits],\r\n batch_size=batch_size,\r\n num_threads=2,\r\n capacity=min_queue_examples + 3 * batch_size)\r\n return image_batch, length_batch, digits_batch\r\n\r\n\r\ndef eval_imagenet(net_name, param_path, param_q_path, qbits, layer_index, layer_name, file_idx, shift_back, trainable=False, err_mean=None, err_stddev=None, train_vars=None, cost_factor=200., n_epoch=1):\r\n\t\"\"\"all layers are trainable in the conventional retraining procedure\"\"\"\r\n\tif '.ckpt' in param_path:\r\n\t\tnetparams = load.load_netparams_tf(param_path, trainable=True)\r\n\telse:\r\n\t\tnetparams = load.load_netparams_tf_q(param_path, trainable=True)\r\n\r\n\tdata_spec = helper.get_data_spec(net_name)\r\n\tinput_node = tf.placeholder(tf.float32, shape=(None, data_spec.crop_size, data_spec.crop_size, data_spec.channels))\r\n\tlabel_node = tf.placeholder(tf.int32)\r\n\r\n\tif net_name == 'alexnet_noisy':\r\n\t\tlogits_, err_w, err_b, err_lyr = networks.alexnet_noisy(input_node, netparams, err_mean, err_stddev, train_vars)\r\n\telif net_name == 'alexnet':\r\n\t\tif trainable:\r\n\t\t\tlogits_, weights_conv4_tmp = alexnet.alexnet_q_RL(input_node, netparams, qbits, layer_index)\r\n\t\telse:\r\n\t\t\tlogits_ , _ , _ = alexnet.alexnet(input_node, netparams)\r\n\telif net_name == 'alexnet_shift':\r\n\t\tlogits_ = networks.alexnet_shift(input_node, netparams)\r\n\telif net_name == 'googlenet':\r\n\t\tlogits_, err_w, err_b, err_lyr = networks.googlenet_noisy(input_node, netparams, err_mean, err_stddev, train_vars)\r\n\telif net_name == 'nin':\r\n\t\tlogits_, err_w, err_b, err_lyr = networks.nin_noisy(input_node, netparams, err_mean, err_stddev, train_vars)\r\n\telif net_name == 'resnet18':\r\n\t\tlogits_ = resnet18.resnet18(input_node, netparams)\r\n\t\t#logits_, err_w, err_b, err_lyr = networks.resnet18_noisy(input_node, netparams, err_mean, err_stddev, train_vars)\r\n\telif net_name == 'resnet18_shift':\r\n\t\tlogits_ = networks.resnet18_shift(input_node, netparams, shift_back)\r\n\telif net_name == 'resnet50':\r\n\t\tlogits_, err_w, err_b, err_lyr = networks.resnet50_noisy(input_node, netparams, err_mean, err_stddev, train_vars)\r\n\telif net_name == 'squeezenet':\r\n\t\tlogits_, err_w, err_b, err_lyr = networks.squeezenet_noisy(input_node, netparams, err_mean, err_stddev, train_vars)\r\n\telif net_name == 'vgg16net':\r\n\t\tlogits_, err_w, err_b, err_lyr = networks.vgg16net_noisy(input_node, netparams, err_mean, err_stddev, train_vars)\r\n\t\r\n\t#square = [tf.nn.l2_loss(err_w[layer]) for layer in err_w]\r\n\t#square_sum = tf.reduce_sum(square)\r\n\t#loss_op = tf.reduce_mean(tf.nn.oftmax_cross_entropy_with_logits(logits=logits_, labels=label_node)) + cost_factor / (1. + square_sum)\r\n\t\r\n\t# ======== calculating the quantization error of a certain layer ==========\r\n\tif trainable:\r\n\t\t\"\"\" read the quantized weights (quantized version of the most recent retrained) \"\"\"\r\n\t\tw_q_pickle = param_q_path\r\n\t\twith open(w_q_pickle, 'rb') as f:\r\n\t\t\tparams_quantized = pickle.load(f)\r\n\t\t\r\n\t\tlayer = layer_name\r\n\t\tparams_quantized_layer = tf.get_variable(name='params_quantized_layer', initializer=tf.constant(params_quantized[0][layer]), trainable=False)\r\n\t\t\r\n\t\tq_diff = tf.subtract(params_quantized_layer, netparams['weights'][layer])\r\n\t\tq_diff_cost = tf.nn.l2_loss(q_diff)\r\n\t\tloss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits_, labels=label_node)) + cost_factor*q_diff_cost\r\n\r\n\t#loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits_, labels=label_node)) \r\n\r\n\tprobs = helper.softmax(logits_)\r\n\ttop_k_op = tf.nn.in_top_k(probs, label_node, 5)\r\n\t#optimizer = tf.train.AdamOptimizer(learning_rate=0.0001, epsilon=0.1)\r\n\toptimizer = tf.train.AdamOptimizer(learning_rate=0.0001, beta1=0.9, beta2=0.999, epsilon=10-8)\r\n\tif trainable:\r\n\t train_op = optimizer.minimize(loss_op)\r\n\tcorrect_pred = tf.equal(tf.argmax(probs, 1), tf.argmax(label_node, 1))\r\n\taccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\r\n\tsaver = tf.train.Saver()\r\n\twith tf.Session() as sess:\r\n\t\tsess.run(tf.global_variables_initializer())\r\n\t\tif trainable:\r\n\t\t\tcount = 0\r\n\t\t\tcorrect = 0\r\n\t\t\tcur_accuracy = 0\r\n\t\t\tfor i in range(0, n_epoch):\r\n\t\t\t\t#if cur_accuracy >= NET_ACC[net_name]:\r\n\t\t\t\t\t\t#break\r\n\t\t\t\t#image_producer = dataset.ImageNetProducer(val_path='/home/ahmed/projects/NN_quant/ILSVRC2012_img_val_10K/val_10.txt', data_path='/home/ahmed/projects/NN_quant/ILSVRC2012_img_val_10K', data_spec=data_spec)\r\n\t\t\t\tpath_train = '/home/ahmed/projects/NN_quant/imageNet_training'\r\n\r\n\t\t\t\timage_producer = dataset.ImageNetProducer(val_path=path_train + '/train_shuf_'+str(file_idx)+'.txt', data_path=path_train, data_spec=data_spec)\r\n\t\t\t\t#image_producer = dataset.ImageNetProducer(val_path=path_train + '/train_shuf_100images.txt', data_path=path_train, data_spec=data_spec)\r\n\t\t\t\ttotal = len(image_producer) * n_epoch\r\n\t\t\t\tcoordinator = tf.train.Coordinator()\r\n\t\t\t\tthreads = image_producer.start(session=sess, coordinator=coordinator)\r\n\t\t\t\tfor (labels, images) in image_producer.batches(sess):\r\n\t\t\t\t\tone_hot_labels = np.zeros((len(labels), 1000))\r\n\t\t\t\t\tfor k in range(len(labels)):\r\n\t\t\t\t\t\tone_hot_labels[k][labels[k]] = 1\r\n\t\t\t\t\tsess.run(train_op, feed_dict={input_node: images, label_node: one_hot_labels})\r\n\t\t\t\t\t\r\n\t\t\t\t\t# AHMED: debug\r\n\t\t\t\t\t#netparams_tmp = sess.run(netparams)\r\n\t\t\t\t\t#print('train = ', np.amax(netparams_tmp['weights']['conv2']))\r\n\t\t\t\t\t#print('len set = ', len(set(np.array(netparams['weights']['conv2']))))\r\n\t\t\t\t\t# ------------\r\n\t\t\t\t\t\r\n\t\t\t\t\t#correct += np.sum(sess.run(top_k_op, feed_dict={input_node: images, label_node: labels}))\r\n\t\t\t\t\t# AHMED: modify \r\n\t\t\t\t\t#top, logits_tmp, loss_op_tmp = sess.run([top_k_op, logits_q, loss_op], feed_dict={input_node: images, label_node: labels})\r\n\t\t\t\t\t#top, act_q_tmp, weights_fp_tmp, weights_q_tmp = sess.run([top_k_op, act_, weights_fp, weights_q], feed_dict={input_node: images, label_node: labels})\r\n\t\t\t\t\ttop, weights_conv4_tmp_ret = sess.run([top_k_op, weights_conv4_tmp], feed_dict={input_node: images, label_node: labels})\r\n\t\t\t\t\tcorrect += np.sum(top)\r\n\t\t\t\t\t#print(np.amax(weights_q_tmp))\r\n\t\t\t\t\t#print(len(set(weights_q_tmp.ravel())))\r\n\t\t\t\t\t# --------\r\n\t\t\t\t\tcount += len(labels)\r\n\t\t\t\t\tcur_accuracy = float(correct) * 100 / count\r\n\t\t\t\t\twrite_to_csv([count, total, cur_accuracy])\r\n\t\t\t\t\tprint('{:>6}/{:<6} {:>6.2f}%'.format(count, total, cur_accuracy))\r\n\t\t\t\tcoordinator.request_stop()\r\n\t\t\t\tcoordinator.join(threads, stop_grace_period_secs=2)\r\n\t\t\t#return sess.run(err_w), cur_accuracy\r\n\t\t\t# \"sess.run\" returns the netparams as normal value (converts it from tf to normal python variable)\r\n\t\t\treturn cur_accuracy, sess.run(netparams)\r\n\t\telse:\r\n\t\t\tcount = 0\r\n\t\t\tcorrect = 0\r\n\t\t\tcur_accuracy = 0\r\n\t\t\tpath_val = './nn_quant_and_run_code_train/ILSVRC2012_img_val'\r\n\t\t\timage_producer = dataset.ImageNetProducer(val_path=path_val + '/val_1k.txt', data_path=path_val, data_spec=data_spec)\r\n\t\t\t#image_producer = dataset.ImageNetProducer(val_path='/home/ahmed/projects/NN_quant/ILSVRC2012_img_val_40K/val_40.txt', data_path='/home/ahmed/projects/NN_quant/ILSVRC2012_img_val_40K', data_spec=data_spec)\r\n\t\t\ttotal = len(image_producer)\r\n\t\t\tcoordinator = tf.train.Coordinator()\r\n\t\t\tthreads = image_producer.start(session=sess, coordinator=coordinator)\r\n\t\t\tfor (labels, images) in image_producer.batches(sess):\r\n\t\t\t\tone_hot_labels = np.zeros((len(labels), 1000))\r\n\t\t\t\tfor k in range(len(labels)):\r\n\t\t\t\t\tone_hot_labels[k][labels[k]] = 1\r\n\t\t\t\t#correct += np.sum(sess.run(top_k_op, feed_dict={input_node: images, label_node: labels}))\r\n\t\t\t\ttop = sess.run([top_k_op], feed_dict={input_node: images, label_node: labels})\r\n\t\t\t\tcorrect += np.sum(top)\r\n\t\t\t\tcount += len(labels)\r\n\t\t\t\tcur_accuracy = float(correct) * 100 / count\r\n\t\t\t\tprint('{:>6}/{:<6} {:>6.2f}%'.format(count, total, cur_accuracy))\r\n\t\t\tcoordinator.request_stop()\r\n\t\t\tcoordinator.join(threads, stop_grace_period_secs=2)\r\n\t\t\treturn cur_accuracy, 0\r\n\r\ndef eval_imagenet_q(net_name, param_pickle_path):\r\n\tnetparams = load.load_netparams_tf_q(param_pickle_path)\r\n\tdata_spec = helper.get_data_spec(net_name)\r\n\tinput_node = tf.placeholder(tf.float32, shape=(None, data_spec.crop_size, data_spec.crop_size, data_spec.channels))\r\n\tlabel_node = tf.placeholder(tf.int32)\r\n\tif net_name == 'alexnet':\r\n\t\tlogits_ = alexnet.alexnet(input_node, netparams)\r\n\telif net_name == 'googlenet':\r\n\t\tlogits_ = networks.googlenet(input_node, netparams)\r\n\telif net_name == 'nin':\r\n\t\tlogits_ = networks.nin(input_node, netparams)\r\n\telif net_name == 'resnet18':\r\n\t\tlogits_ = networks.resnet18(input_node, netparams)\r\n\telif net_name == 'resnet50':\r\n\t\tlogits_ = networks.resnet50(input_node, netparams)\r\n\telif net_name == 'squeezenet':\r\n\t\tlogits_ = networks.squeezenet(input_node, netparams)\r\n\telif net_name == 'vgg16net':\r\n\t\tlogits_ = networks.vgg16net_noisy(input_node, netparams)\r\n\tloss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits_, labels=label_node)) # \r\n\tprobs = softmax(logits_)\r\n\ttop_k_op = tf.nn.in_top_k(probs, label_node, 5)\r\n\toptimizer = tf.train.AdamOptimizer(learning_rate=0.001, epsilon=0.1)\r\n\tcorrect_pred = tf.equal(tf.argmax(probs, 1), tf.argmax(label_node, 1))\r\n\taccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\r\n\tcount = 0\r\n\tcorrect = 0\r\n\tcur_accuracy = 0\r\n\tsaver = tf.train.Saver()\r\n\twith tf.Session() as sess:\r\n\t\tsess.run(tf.global_variables_initializer())\r\n\t\timage_producer = dataset.ImageNetProducer(val_path=IMAGE_LABLE, data_path=IMAGE_PATH, data_spec=data_spec)\r\n\t\ttotal = len(image_producer)\r\n\t\tcoordinator = tf.train.Coordinator()\r\n\t\tthreads = image_producer.start(session=sess, coordinator=coordinator)\r\n\t\tfor (labels, images) in image_producer.batches(sess):\r\n\t\t\tcorrect += np.sum(sess.run(top_k_op, feed_dict={input_node: images, label_node: labels}))\r\n\t\t\tcount += len(labels)\r\n\t\t\tcur_accuracy = float(correct) * 100 / count\r\n\t\t\tprint('{:>6}/{:<6} {:>6.2f}%'.format(count, total, cur_accuracy))\r\n\t\tprint(cur_accuracy)\r\n\t\tcoordinator.request_stop()\r\n\t\tcoordinator.join(threads, stop_grace_period_secs=2)\r\n\treturn cur_accuracy\r\n\r\n\r\n\r\ndef evaluator_svhn(param_path, qbits_dict, init=0):\r\n\tprint('=> Evaluating trained model on val data ...')\r\n\t# ------------------------------------------------------------\r\n\t# ------- evaluator ------------------------------------------\r\n\tbatch_size = batch_size_val\r\n\twith tf.Graph().as_default():\r\n\t\t#ckpt_path = '/home/ahmed/projects/SVHNClassifier/logs/train/latest.ckpt'\r\n\t\t#netparams = load.load_svhn_netparams_tf_q(path, trainable=False)\r\n\r\n\t\tif '.ckpt' in param_path:\r\n\t\t\tnetparams_eval = load.load_svhn_netparams_tf(param_path, trainable=False)\r\n\t\telse:\r\n\t\t\tnetparams_eval = load.load_svhn_netparams_tf_q(param_path, trainable=False)\r\n\r\n\t\r\n\t\t#print(netparams['weights']['hidden1'])\r\n\t\tpath_to_val_tfrecords_file = '/backup/ahmed_projects/SVHNClassifier/data/val.tfrecords'\r\n\r\n\t\tvimage_batch, vlength_batch, vdigits_batch = Donkey.build_batch(path_to_val_tfrecords_file,\r\n\t num_examples=num_val_examples,\r\n\t batch_size=batch_size,\r\n\t shuffled=False)\r\n\t\tvinput_node = vimage_batch\r\n\t\t#vlength_logits, vdigits_logits = svhn_net.svhn_net(vinput_node, netparams_eval)\r\n\t\tvlength_logits, vdigits_logits = svhn_net.svhn_net_q(vinput_node, netparams_eval, qbits_dict)\r\n\r\n\t\tlength_predictions = tf.argmax(vlength_logits, axis=1)\r\n\t\tdigits_predictions = tf.argmax(vdigits_logits, axis=2)\r\n\r\n\t\tneeds_include_length = False\r\n\t\tif needs_include_length:\r\n\t\t\tlabels = tf.concat([tf.reshape(length_batch, [-1, 1]), vdigits_batch], axis=1)\r\n\t\t\tpredictions = tf.concat([tf.reshape(length_predictions, [-1, 1]), digits_predictions], axis=1)\r\n\t\telse:\r\n\t\t\tlabels = vdigits_batch\r\n\t\t\tpredictions = digits_predictions\r\n\r\n\t\tlabels_string = tf.reduce_join(tf.as_string(labels), axis=1)\r\n\t\tpredictions_string = tf.reduce_join(tf.as_string(predictions), axis=1)\r\n\r\n\t\taccuracy, update_accuracy = tf.metrics.accuracy(\r\n\t\t labels=labels_string,\r\n\t\t predictions=predictions_string\r\n\t\t)\r\n\t\tprint(' debug # 2')\r\n\t\tnum_batches = num_val_examples / batch_size_val\r\n\r\n\t\twith tf.Session() as sess:\r\n\t\t\tsess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])\r\n\t\t\tcoord = tf.train.Coordinator()\r\n\t\t\tthreads = tf.train.start_queue_runners(sess=sess, coord=coord)\r\n \r\n\t\t\tfor _ in range(int(num_batches)):\r\n\t\t\t\tsess.run(update_accuracy)\r\n\t\t\t\t#accuracy_val = sess.run([accuracy])\r\n\t\t\t\t#self.summary_writer.add_summary(summary_val, global_step=global_step)\r\n\t\t\taccuracy_val = sess.run([accuracy])\r\n\r\n\t\t\tcoord.request_stop()\r\n\t\t\tcoord.join(threads)\r\n\r\n\treturn accuracy_val\r\n\r\n\r\ndef eval_svhn_net(net_name, qbits_dict={}, layer_index=[], layer_name=[], trainable=False, err_mean=None, err_stddev=None, train_vars=None, cost_factor=200., n_epoch=10, init=0):\r\n\t#ckpt_path = '/backup/ahmed_projects/SVHNClassifier/logs/train/latest.ckpt'\r\n\tckpt_path = '/backup/ahmed_projects/SVHNClassifier/logs/train0/latest.ckpt'\r\n\t#print(\"net parameters: ###########################\")\r\n\t#print(netparams['weights']['hidden1'])\r\n\r\n\tdata_spec = helper.get_data_spec(net_name)\r\n\tinput_node = tf.placeholder(tf.float32, shape=(None, data_spec.crop_size * data_spec.crop_size * data_spec.channels))\r\n\tinput_node_2d = tf.reshape(input_node, shape=(-1, data_spec.crop_size, data_spec.crop_size, data_spec.channels))\r\n\tlabel_node = tf.placeholder(tf.float32, [None, 10])\r\n\t\r\n\t# -----\r\n\r\n\tif trainable:\r\n\t\tnum_steps_to_show_loss = 100\r\n\t\tnum_steps_to_check = 1000 \r\n\t\twith tf.Graph().as_default():\r\n\t\t\t\"\"\" train from scratch --> random.normal initialization \"\"\"\r\n\t\t\tif init:\r\n\t\t\t netparams = load.init_svhn_netparams_tf(ckpt_path, trainable=True)\r\n\t\t\telse:\r\n\t\t\t netparams = load.load_svhn_netparams_tf(ckpt_path, trainable=True)\r\n\t\t\tprint('loading checkpoint model params ..')\r\n\t\t\tpath_to_train_tfrecords_file = '/backup/ahmed_projects/SVHNClassifier/data/val.tfrecords'\r\n\t\t\tbatch_size = batch_size_train\r\n\r\n\t\t\timage_batch, length_batch, digits_batch = Donkey.build_batch(path_to_train_tfrecords_file,\r\n\t\t\t num_examples=num_train_examples,\r\n\t\t\t batch_size=batch_size,\r\n\t\t\t shuffled=True)\r\n\t\t\t\r\n\t\t\t\"\"\" forward pass \"\"\" \r\n\t\t\tlength_logits, digits_logits = svhn_net.svhn_net(image_batch, netparams)\r\n\t\t\t#length_logits, digits_logits = svhn_net.svhn_net_q(image_batch, netparams, qbits_dict)\r\n\r\n\r\n\t\t\t\"\"\" (1) \"\"\"\r\n\t\t\t\"\"\" learning the frequency \"\"\"\r\n\t\t\tnum_bits = tf.get_variable(name=\"freq\", initializer=8.0, trainable=True)\r\n\t\t\tnum_bits_2 = tf.get_variable(name=\"freq_2\", initializer=8.0, trainable=True)\r\n\t\t\tnum_bits_3 = tf.get_variable(name=\"freq_3\", initializer=8.0, trainable=True)\r\n\t\t\tnum_bits_4 = tf.get_variable(name=\"freq_4\", initializer=8.0, trainable=True)\r\n\t\t\tnum_bits_5 = tf.get_variable(name=\"freq_5\", initializer=8.0, trainable=True)\r\n\t\t\tnum_bits_6 = tf.get_variable(name=\"freq_6\", initializer=8.0, trainable=True)\r\n\t\t\t\"\"\" sinreq regularization \"\"\"\r\n\t\t\t#cost_factor = 30.0 \r\n\t\t\tsin2_func_0 = tf.constant(0.0)\r\n\t\t\tsin2_func_1 = tf.constant(0.0)\r\n\t\t\tsin2_func_2 = tf.constant(0.0)\r\n\t\t\tsin2_func_3 = tf.constant(0.0)\r\n\t\t\tsin2_func_4 = tf.constant(0.0)\r\n\t\t\tsin2_func_5 = tf.constant(0.0)\r\n\t\t\tsin2_func_6 = tf.constant(0.0)\r\n\t\t\tsin2_func_7 = tf.constant(0.0)\r\n\t\t\tsin2_func_8 = tf.constant(0.0)\r\n\t\t\tsin2_func_9 = tf.constant(0.0)\r\n\t\t\tsin2_func_10 = tf.constant(0.0)\r\n\r\n\t\t\t\"\"\" (2) \"\"\"\r\n\t\t\tlambda_q = tf.constant(0.0)\r\n\t\t\tlambda_f = tf.constant(0.0)\r\n\t\t\tfreq_loss = tf.constant(0.0)\r\n\t\t\tsin2_loss = tf.constant(0.0)\r\n\t\t\tsin2_loss_2 = tf.constant(0.0)\r\n\t\t\tsin2_loss_3 = tf.constant(0.0)\r\n\t\t\tsin2_loss_4 = tf.constant(0.0)\r\n\t\t\tsin2_loss_5 = tf.constant(0.0)\r\n\t\t\tsin2_loss_6 = tf.constant(0.0)\r\n\r\n\t\t\t\"\"\" (3) \"\"\"\r\n\t\t\tlayer_name = 'hidden1'\r\n\t\t\tqbits = qbits_dict[layer_name]\r\n\t\t\tif qbits < 8:\r\n\t\t\t\t#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights']['conv2']/(2**(-(qbits[1]-1))))))\r\n\t\t\t\t#sin2_func_0 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]/(2**(-(qbits))))))\r\n\t\t\t\t#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*(netparams['weights']['conv2']+2**-(qbits[1]))/(2**(-(qbits[1]-1))))))\r\n\t\t\t\t\" R0(w,b)\" \r\n\t\t\t\t#sin2_loss = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]/(2**(-tf.identity(num_bits)))))) \r\n\t\t\t\t\" R1(w,b)\" \r\n\t\t\t\tsin2_loss = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]*(2**(num_bits)-1)))/2**(num_bits)) \r\n\t\t\t\t\" R2(w,b)\" \r\n\t\t\t\t#sin2_loss = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]*(2**(num_bits)-1)))/4**(num_bits)) \r\n\t\t\t\t\r\n\t\t\tlayer_name = 'hidden2'\r\n\t\t\tqbits = qbits_dict[layer_name]\r\n\t\t\tif qbits < 8:\r\n\t\t\t\t#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights']['conv2']/(2**(-(qbits[1]-1))))))\r\n\t\t\t\t#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]/(2**(-(qbits))))))\r\n\t\t\t\t#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*(netparams['weights']['conv2']+2**-(qbits[1]))/(2**(-(qbits[1]-1))))))\r\n\r\n\t\t\t\t#sin2_loss = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]/(2**(-(num_bits)))))) \r\n\t\t\t\t\" R0(w,b)\" \r\n\t\t\t\t#sin2_loss_2 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]/(2**(-tf.identity(num_bits_2)))))) \r\n\t\t\t\t\" R1(w,b)\" \r\n\t\t\t\tsin2_loss_2 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]*(2**(num_bits_2)-1)))/2**(num_bits_2)) \r\n\t\t\t\t\" R2(w,b)\" \r\n\t\t\t\t#sin2_loss_2 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]*(2**(num_bits_2)-1)))/4**(num_bits_2)) \r\n\t\t\t\t\r\n\t\t\tlayer_name = 'hidden3'\r\n\t\t\tqbits = qbits_dict[layer_name]\r\n\t\t\tif qbits < 8:\r\n\t\t\t\t#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights']['conv2']/(2**(-(qbits[1]-1))))))\r\n\t\t\t\t#sin2_func_2 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]/(2**(-(qbits))))))\r\n\t\t\t\t#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*(netparams['weights']['conv2']+2**-(qbits[1]))/(2**(-(qbits[1]-1))))))\r\n\r\n\t\t\t\t\" R0(w,b)\" \r\n\t\t\t\t#sin2_loss_3 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]/(2**(-tf.identity(num_bits_3)))))) \r\n\t\t\t\t\" R1(w,b)\" \r\n\t\t\t\tsin2_loss_3 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]*(2**(num_bits_3)-1)))/2**(num_bits_3)) \r\n\t\t\t\t\" R2(w,b)\" \r\n\t\t\t\t#sin2_loss_3 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]*(2**(num_bits_3)-1)))/4**(num_bits_3)) \r\n\t\t\t\tfreq_loss_3 = num_bits_3\r\n\t\t\t\t\r\n\t\t\tlayer_name = 'hidden4'\r\n\t\t\tqbits = qbits_dict[layer_name]\r\n\t\t\tif qbits < 8:\r\n\t\t\t\t#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights']['conv2']/(2**(-(qbits[1]-1))))))\r\n\t\t\t\t#sin2_func_3 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]/(2**(-(qbits))))))\r\n\t\t\t\t#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*(netparams['weights']['conv2']+2**-(qbits[1]))/(2**(-(qbits[1]-1))))))\r\n\t\t\t\t\" R0(w,b)\" \r\n\t\t\t\t#sin2_loss_4 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]/(2**(-tf.identity(num_bits_4)))))) \r\n\t\t\t\t\" R1(w,b)\" \r\n\t\t\t\tsin2_loss_4 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]*(2**(num_bits_4)-1)))/2**(num_bits_4)) \r\n\t\t\t\t\" R2(w,b)\" \r\n\t\t\t\t#sin2_loss_4 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]*(2**(num_bits_4)-1)))/4**(num_bits_4)) \r\n\t\t\t\t\r\n\t\t\tlayer_name = 'hidden5'\r\n\t\t\tqbits = qbits_dict[layer_name]\r\n\t\t\tif qbits < 8:\r\n\t\t\t\t#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights']['conv2']/(2**(-(qbits[1]-1))))))\r\n\t\t\t\t#sin2_func_4 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]/(2**(-(qbits))))))\r\n\t\t\t\t#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*(netparams['weights']['conv2']+2**-(qbits[1]))/(2**(-(qbits[1]-1))))))\r\n\t\t\t\t\" R0(w,b)\" \r\n\t\t\t\t#sin2_loss_5 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]/(2**(-tf.identity(num_bits_5)))))) \r\n\t\t\t\t\" R1(w,b)\" \r\n\t\t\t\tsin2_loss_5 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]*(2**(num_bits_5)-1)))/2**(num_bits_5)) \r\n\t\t\t\t\" R2(w,b)\" \r\n\t\t\t\t#sin2_loss_5 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]*(2**(num_bits_5)-1)))/4**(num_bits_5)) \r\n\t\r\n\t\t\tlayer_name = 'hidden6'\r\n\t\t\tqbits = qbits_dict[layer_name]\r\n\t\t\tif qbits < 8:\r\n\t\t\t\t#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights']['conv2']/(2**(-(qbits[1]-1))))))\r\n\t\t\t\t#sin2_func_5 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]/(2**(-(qbits))))))\r\n\t\t\t\t#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*(netparams['weights']['conv2']+2**-(qbits[1]))/(2**(-(qbits[1]-1))))))\r\n\t\t\t\t\" R0(w,b)\" \r\n\t\t\t\t#sin2_loss_6 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]/(2**(-tf.identity(num_bits_6)))))) \r\n\t\t\t\t\" R1(w,b)\" \r\n\t\t\t\tsin2_loss_6 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]*(2**(num_bits_6)-1)))/2**(num_bits_6)) \r\n\t\t\t\t\" R2(w,b)\" \r\n\t\t\t\t#sin2_loss_6 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]*(2**(num_bits_6)-1)))/4**(num_bits_6)) \r\n\t\r\n\r\n\t\t\tlayer_name = 'hidden7'\r\n\t\t\tqbits = qbits_dict[layer_name]\r\n\t\t\tif qbits < 8:\r\n\t\t\t\t#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights']['conv2']/(2**(-(qbits[1]-1))))))\r\n\t\t\t\tsin2_func_6 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]/(2**(-(qbits))))))\r\n\t\t\t\t#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*(netparams['weights']['conv2']+2**-(qbits[1]))/(2**(-(qbits[1]-1))))))\r\n\t\t\tlayer_name = 'hidden8'\r\n\t\t\tqbits = qbits_dict[layer_name]\r\n\t\t\tif qbits < 8:\r\n\t\t\t\t#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights']['conv2']/(2**(-(qbits[1]-1))))))\r\n\t\t\t\tsin2_func_7 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]/(2**(-(qbits))))))\r\n\t\t\t\t#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*(netparams['weights']['conv2']+2**-(qbits[1]))/(2**(-(qbits[1]-1))))))\r\n\t\t\tlayer_name = 'hidden9'\r\n\t\t\tqbits = qbits_dict[layer_name]\r\n\t\t\tif qbits < 8:\r\n\t\t\t\t#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights']['conv2']/(2**(-(qbits[1]-1))))))\r\n\t\t\t\tsin2_func_8 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]/(2**(-(qbits))))))\r\n\t\t\t\t#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*(netparams['weights']['conv2']+2**-(qbits[1]))/(2**(-(qbits[1]-1))))))\r\n\t\t\tlayer_name = 'hidden10'\r\n\t\t\tqbits = qbits_dict[layer_name]\r\n\t\t\tif qbits < 8:\r\n\t\t\t\t#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights']['conv2']/(2**(-(qbits[1]-1))))))\r\n\t\t\t\tsin2_func_9 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]/(2**(-(qbits))))))\r\n\t\t\t\t#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*(netparams['weights']['conv2']+2**-(qbits[1]))/(2**(-(qbits[1]-1))))))\r\n\r\n\r\n\t\t\t\"\"\" ------------------------------------------------ \"\"\"\r\n\r\n\t\t\t# loss calculation \r\n\t\t\tlength_labels = length_batch\r\n\t\t\tdigits_labels = digits_batch\r\n\t\t\tlength_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=length_labels, logits=length_logits))\r\n\t\t\tdigit1_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=digits_labels[:, 0], logits=digits_logits[:, 0, :]))\r\n\t\t\tdigit2_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=digits_labels[:, 1], logits=digits_logits[:, 1, :]))\r\n\t\t\tdigit3_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=digits_labels[:, 2], logits=digits_logits[:, 2, :]))\r\n\t\t\tdigit4_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=digits_labels[:, 3], logits=digits_logits[:, 3, :]))\r\n\t\t\tdigit5_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=digits_labels[:, 4], logits=digits_logits[:, 4, :]))\r\n\t\t\t#loss_sin2_reg = cost_factor*(sin2_func_1 + sin2_func_2 + sin2_func_3 + sin2_func_4)\r\n\t\t\t#loss_sin2_reg = cost_factor*(sin2_func_0 + sin2_func_1 + sin2_func_2 + sin2_func_3 + sin2_func_4 + sin2_func_5 + sin2_func_6 + sin2_func_7 + sin2_func_8 + sin2_func_9)\r\n\t\t\tloss_sin2_reg = lambda_q * cost_factor * (sin2_loss + sin2_loss_2 + sin2_loss_3 + sin2_loss_4 + sin2_loss_5 + sin2_loss_6) \r\n\t\t\tloss_freq_reg = lambda_f * 1 * (num_bits + num_bits_2 + num_bits_3 + num_bits_4 + num_bits_5 + num_bits_6)\r\n\t\t\t#loss_freq_reg = lambda_f * cost_factor * (freq_loss + freq_loss_2)\r\n\r\n\t\t\t#loss_op = length_cross_entropy + digit1_cross_entropy + digit2_cross_entropy + digit3_cross_entropy + digit4_cross_entropy + digit5_cross_entropy + loss_sin2_reg + loss_freq_reg \r\n\t\t\tacc_loss = length_cross_entropy + digit1_cross_entropy + digit2_cross_entropy + digit3_cross_entropy + digit4_cross_entropy + digit5_cross_entropy \r\n\t\t\tloss_op = acc_loss + loss_sin2_reg + loss_freq_reg \r\n\r\n\t\t\tglobal_step = tf.Variable(0, name='global_step', trainable=False)\r\n\t\t\ttraining_options = {}\r\n\t\t\ttraining_options['learning_rate'] = 1e-2\r\n\t\t\ttraining_options['decay_steps'] = 10000\r\n\t\t\ttraining_options['decay_rate'] = 0.9\r\n\t\t\tlearning_rate = tf.train.exponential_decay(training_options['learning_rate'], global_step=global_step,\r\n decay_steps=training_options['decay_steps'], decay_rate=training_options['decay_rate'], staircase=True)\r\n\t\t\toptimizer = tf.train.GradientDescentOptimizer(learning_rate)\r\n\t\t\t#optimizer = tf.train.AdamOptimizer(learning_rate=0.001)\r\n\t\t\ttrain_op = optimizer.minimize(loss_op, global_step=global_step)\r\n\t\t\t\r\n\t\t\twith tf.Session() as sess:\r\n\t\t\t\tsess.run(tf.global_variables_initializer())\r\n\t\t\t\tcoord = tf.train.Coordinator()\r\n\t\t\t\tthreads = tf.train.start_queue_runners(sess=sess, coord=coord) \r\n\t\t\t\tsaver = tf.train.Saver()\r\n\r\n\t\t\t\t\r\n\t\t\t\tprint('=> Start training ..')\r\n\t\t\t\tprint('########################################')\r\n\r\n\t\t\t\t#cur_accuracy = 0\r\n\t\t\t\t#patience = initial_patience\r\n\t\t\t\tbest_accuracy = 0.0\r\n\t\t\t\tduration = 0.0\r\n\r\n\t\t\t\tfor i in range(0, n_epoch): \r\n\t\t\t\t\tstep = i\r\n\t\t\t\t\tn_steps = n_epoch\r\n\t\t\t\t\t\r\n\t\t\t\t\t#print(' debug # 0')\r\n\t\t\t\t\t\" Method 0 \"\r\n\t\t\t\t\t#lambda_q_value = (1/2)*np.exp(i*2/n_epoch) # rising1\r\n\t\t\t\t\t#lambda_f_value = (1/2)*np.exp(i*2/n_epoch) # rising1\r\n\t\t\t\t\t#lambda_f_value = 0.01\r\n\r\n\t\t\t\t\t\"Method 2: step-like lambda \"\r\n\t\t\t\t\tr = 0.2*n_epoch\r\n\t\t\t\t\td = 0.8*n_epoch\r\n\t\t\t\t\ts = 20\r\n\t\t\t\t\tf1 = 0.5 * (1+np.tanh((i-r)/s));\r\n\t\t\t\t\tf2 = 0.5 * (1+np.tanh((i-d)/s));\r\n\t\t\t\t\tlambda_q_value = f1\r\n\t\t\t\t\t#lambda_f_value = 0.02*(f1-f2)\r\n\t\t\t\t\tlambda_f_value = 0.03\r\n\r\n\t\t\t\t\t#\"Method 1: old method \"\r\n\t\t\t\t\t#scale = 3\r\n\t\t\t\t\t#shift = 2\r\n\t\t\t\t\t#lambda_q_value = 0.5*(np.tanh(pi*scale*(step-n_steps/shift)/n_steps)-np.tanh(pi*scale*(0-n_steps/shift)/n_steps));\r\n\t\t\t\t\t#scale = 3\r\n\t\t\t\t\t#shift = 2\r\n\t\t\t\t\t#lambda_f_value = 1/(np.cosh(pi*scale*(step-n_steps/shift)/n_steps))\r\n\t\t\t\t\t\r\n\t\t\t\t\tstart_time = time.time()\r\n\t\t\t\t\t#_, loss_val, global_step_val = sess.run([train_op, loss_op, global_step])\r\n\t\t\t\t\t_, loss_val, acc_loss_val, loss_sin2_reg_val, loss_freq_reg_val, global_step_val = sess.run([train_op, loss_op, acc_loss, loss_sin2_reg, loss_freq_reg, global_step], feed_dict={lambda_q: lambda_q_value, lambda_f: lambda_f_value})\r\n\t\t\t\t\tsin2_l, sin2_l_2, sin2_l_3, sin2_l_4, sin2_l_5, sin2_l_6 = sess.run([sin2_loss, sin2_loss_2, sin2_loss_3, sin2_loss_4, sin2_loss_5, sin2_loss_6]) \r\n\t\t\t\t\tn_bits , n_bits_2 , n_bits_3 , n_bits_4 , n_bits_5 , n_bits_6 = sess.run([num_bits , num_bits_2 , num_bits_3 , num_bits_4 , num_bits_5 , num_bits_6])\r\n\t\t\t\t\tduration += time.time() - start_time\r\n\t\t\t\t\t\r\n\t\t\t\t\t#print('=> %s: step %d, loss = %f ' % (\r\n\t\t\t\t\t# \tdatetime.now(), global_step_val, loss_val))\r\n\r\n\t\t\t\t\tprint('=> %s: step %d, total_loss = %f, sin2_reg_loss = %f , freq_reg_loss = %f' % (\r\n\t\t\t\t\t \tdatetime.now(), global_step_val, loss_val, loss_sin2_reg_val, loss_freq_reg_val))\r\n\t\t\t\t\tprint(\"lambda_q_value = \", lambda_q_value)\r\n\t\t\t\t\tprint('=> l1 = %f , l2 = %f , l3 = %f , l4 = %f , l5 = %f , l6 = %f' % (n_bits , n_bits_2 , n_bits_3 , n_bits_4 , n_bits_5 , n_bits_6))\r\n\t\t\t\t\t\r\n\t\t\t\t\t\"\"\"\r\n\t\t\t\t\tif global_step_val % num_steps_to_show_loss == 0:\r\n\t\t\t\t\t\texamples_per_sec = batch_size * num_steps_to_show_loss / duration\r\n\t\t\t\t\t\tduration = 0.0\r\n\t\t\t\t\t\tprint('=> %s: step %d, loss = %f (%.1f examples/sec)' % (\r\n\t\t\t\t\t \tdatetime.now(), global_step_val, loss_val, examples_per_sec))\r\n\r\n\t\t\t\t\tif global_step_val % num_steps_to_check != 0:\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\"\"\"\r\n\t\t\t\t\t#_, loss_val = sess.run([train_op, loss_op])\r\n\t\t\t\t\twrite_to_data([lambda_q_value, lambda_f_value, loss_val, acc_loss_val, loss_sin2_reg_val, loss_freq_reg_val, \\\r\n\t\t\t\t\t\t\tsin2_l, sin2_l_2, sin2_l_3, sin2_l_4, sin2_l_5, sin2_l_6, \\\r\n\t\t\t\t\t\t\t n_bits, n_bits_2, n_bits_3, n_bits_4, n_bits_5, n_bits_6])\r\n\t\t\t\t\tprint('---------------- finished epoch# ', i)\r\n\t\t\t\t\r\n\t\t\t\t\tnetparams_save = sess.run(netparams)\r\n\t\t\t\t\tprint(' Training finished')\r\n\r\n\t\t\t\t\t\"\"\" path for saving the retrained model \"\"\"\r\n\t\t\t\t\tnetwork_name = 'svhn_net'\r\n\t\t\t\t\tpath_save = './results_retrained_models/' + network_name + '/quantized/' + network_name\r\n\t\t\t\t\tpath_save_params = path_save + '_retrained_lambda_'+str(cost_factor)+'_CONV.pickle'\r\n\t\t\t\t\t# AHMED: debug\r\n\t\t\t\t\t#print('retrained = ', np.amax(netparams['weights']['conv2']))\r\n\t\t\t\t\t#print('len set = ', len(set(np.array(netparams['weights']['conv2']))))\r\n\t\t\t\t\t# ------------\r\n\t\t\t\t\t#print('=================================================')\r\n\t\t\t\tprint('=> Writing trained model parameters ...')\r\n\t\t\t\t#print('=================================================')\r\n\t\t\t\tprint(len(netparams_save['weights']))\r\n\t\t\t\tprint(netparams_save['weights'].keys())\r\n\t\t\t\twith open(path_save_params, 'wb') as f:\r\n\t\t\t\t\tpickle.dump(netparams_save, f)\r\n\r\n\t\t\t\tprint('=> Evaluating on validation dataset...')\r\n\t\t\t\taccuracy_val = evaluator_svhn(path_save_params, qbits_dict)\r\n\t\t\t\tprint('epoch #', i)\r\n\t\t\t\tprint('accuracy', accuracy_val)\r\n\t\t\t\tcoord.request_stop()\r\n\t\t\t\tcoord.join(threads)\t\r\n\t\t\t\treturn accuracy_val\t\r\n\r\n\r\n\telse: # inference \r\n\t\tnetparams = load.load_svhn_netparams_tf(ckpt_path, trainable=False)\r\n\t\tpath_to_val_tfrecords_file = '/home/ahmed/projects/SVHNClassifier/data/val.tfrecords'\r\n\t\tbatch_size = batch_size_val\r\n\r\n\t\taccuracy_val = evaluator_svhn(ckpt_path, qbits_dict)\r\n\t\treturn accuracy_val\r\n\t\t\t\r\ndef eval_lenet(net_name, param_path, qbits, layer_index, layer_name=[], trainable=False, err_mean=None, err_stddev=None, train_vars=None, cost_factor=200., n_epoch=1):\r\n\t#netparams = load.load_netparams_tf(ckpt_path, trainable=False)\r\n\r\n\tif '.ckpt' in param_path:\r\n\t\tnetparams = load.load_netparams_tf(param_path, trainable=trainable)\r\n\telse:\r\n\t\tnetparams = load.load_netparams_tf_q(param_path, trainable=trainable)\r\n\t\r\n\tdata_spec = helper.get_data_spec(net_name)\r\n\tinput_node = tf.placeholder(tf.float32, shape=(None, data_spec.crop_size * data_spec.crop_size * data_spec.channels))\r\n\tinput_node_2d = tf.reshape(input_node, shape=(-1, data_spec.crop_size, data_spec.crop_size, data_spec.channels))\r\n\tlabel_node = tf.placeholder(tf.float32, [None, 10])\r\n\t#logits_, err_w, err_b, err_lyr = lenet.lenet_noisy(input_node_2d, netparams, err_mean, err_stddev, train_vars)\r\n\t#logits_ = lenet.lenet_quantized(input_node_2d, netparams, qbits)\r\n\tif trainable:\r\n\t\t#logits_ = lenet.lenet_q_RL(input_node_2d, netparams, qbits, layer_index)\r\n\t\tlogits_, ret = lenet.lenet_quantized(input_node_2d, netparams, qbits)\r\n\telse:\r\n\t\t#logits_, ret = lenet.lenet_quantized(input_node_2d, netparams, qbits)\r\n\t\tlogits_, ret = lenet.lenet(input_node_2d, netparams)\r\n\t#square = [tf.nn.l2_loss(err_w[layer]) for layer in err_w]\r\n\t#square_sum = tf.reduce_sum(square)\r\n\t#loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits_, labels=label_node)) + cost_factor / (1. + square_sum)\r\n\tloss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits_, labels=label_node)) \r\n\toptimizer = tf.train.AdamOptimizer(learning_rate=0.001)\r\n\tif trainable:\r\n\t\ttrain_op = optimizer.minimize(loss_op)\r\n\tprobs = helper.softmax(logits_)\r\n\tcorrect_pred = tf.equal(tf.argmax(probs, 1), tf.argmax(label_node, 1))\r\n\taccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\r\n\t#mnist = mnist_input.read_data_sets(\"/tmp/data/\", one_hot=True)\r\n\tmnist = mnist_input.read_data_sets(\"/home/ahmed/mnist\", one_hot=True)\r\n\t#print('############################################')\r\n\t#[print(n.name) for n in tf.get_default_graph().as_graph_def().node]\r\n\twith tf.Session() as sess:\r\n\t\tsess.run(tf.global_variables_initializer())\r\n\t\tsaver = tf.train.Saver()\r\n\t\t# saving a checkpoint --------------------------------------------------------------------\r\n\t\tsaver.save(sess, 'lenet_save_ckpt/my-model-10000') # will generate my-model-10000.meta\r\n\t\tsaver.export_meta_graph('lenet_save_ckpt/my-model-10000.meta') # not need\r\n\t\t# ----------------------------------------------------------------------------------------\r\n\t\tcur_accuracy = 0\r\n\t\tfor i in range(0, n_epoch):\r\n\t\t\t#if cur_accuracy >= NET_ACC[net_name]:\r\n\t\t\t\t\t#break\r\n\t\t\tif trainable:\r\n\t\t\t\tfor step in range(0, int(mnist.train.num_examples/data_spec.batch_size)):\r\n\t\t\t\t\tbatch_x, batch_y = mnist.train.next_batch(data_spec.batch_size)\r\n\t\t\t\t\t#print(\"batch_x -------------------------------\") \r\n\t\t\t\t\t#print(batch_x)\r\n\t\t\t\t\t#print(\"batch_y -------------------------------\") \r\n\t\t\t\t\t#print(batch_y)\r\n\t\t\t\t\tsess.run(train_op, feed_dict={input_node: batch_x, label_node: batch_y})\r\n\t\t\t\t\t#loss, acc = sess.run([loss_op_1, accuracy], feed_dict={input_node: batch_x, label_node: batch_y})\r\n\t\t\t\tprint('epoch# {:>6} finished\\n', i)\r\n\t\t\t#cur_accuracy = 100 * (sess.run(accuracy, feed_dict={input_node: mnist.test.images[:], label_node: mnist.test.labels[:]}))\r\n\t\t\t#cur_accuracy, ret_tf = (sess.run([accuracy,ret], feed_dict={input_node: mnist.test.images[:], label_node: mnist.test.labels[:]}))\r\n\t\t\tprint(\"mnist.test.images --------------------------------\")\r\n\t\t\tprint((mnist.test.images[:]).shape)\r\n\t\t\tnp.save(\"image_1x781\",mnist.test.images[0])\r\n\t\t\t#print(\"mnist.test.labels --------------------------------\")\r\n\t\t\t#print(mnist.test.labels[0])\r\n\t\t\tcur_accuracy, ret_tf = (sess.run([accuracy,ret], feed_dict={input_node: mnist.test.images[0:1], label_node: mnist.test.labels[0:1]}))\r\n\t\t\tcur_accuracy = 100 * cur_accuracy \r\n\t\t\tprint('################################')\r\n\t\t\t#print(set(ret_tf.ravel()))\r\n\t\t\tnp.set_printoptions(precision=4)\r\n\t\t\tprint((ret_tf))\r\n\t\t\tnp.save(\"image_test\", ret_tf)\r\n\t\t\t#print((ret_tf[0].shape))\r\n\t\t\t#print(np.sum(ret_tf[0]))\r\n\t\t\t#print(np.max(ret_tf[0]))\r\n\t\t\t#print(np.min(ret_tf[0]))\r\n\t\t\t#print(np.mean(ret_tf[0]))\r\n\t\t\t#print(np.var(ret_tf[0]))\r\n\t\t\tprint('{:>6}/{:<6} {:>6.2f}%'.format(i, n_epoch, cur_accuracy))\r\n\t\tprint('Final Test Accuracy = \\t' + (str)(cur_accuracy))\r\n\t\treturn cur_accuracy, sess.run(netparams)\r\n\t\t\t\t\r\ndef run_network(net_name, cost_factor, n_epoch):\r\n\tckpt_path = CKPT_PATH + net_name + '/' + net_name + '.ckpt'\r\n\terr_mean = [0.0, 0.0, 0.0, 0.0] #order: input, weights, biases, layers\r\n\terr_stddev = [0.0, 0.0, 0.0, 0.0]\r\n\ttrain_vars = [False, True, False, False]\r\n\tistrain = True\r\n\tif net_name == 'lenet':\r\n\t\treturn eval_lenet(net_name, ckpt_path, trainable=istrain, err_mean=err_mean, err_stddev=err_stddev, train_vars=train_vars, cost_factor=cost_factor, n_epoch=n_epoch)\r\n\telse:\r\n\t\treturn eval_imagenet(net_name, ckpt_path, trainable=istrain, err_mean=err_mean, err_stddev=err_stddev, train_vars=train_vars, cost_factor=cost_factor, n_epoch=n_epoch)\r\n\r\ndef gen_max_noise_dist():\r\n\tmax_epoch = 5\r\n\tfor net_name in NETWORKS:\r\n\t\tdirectory = '/home/ahmed/projects/NN_quant/results/networks/' + net_name\r\n\t\tif not os.path.exists(directory):\r\n\t\t\tos.makedirs(directory)\r\n\t\tcurrent_factor = 10\r\n\t\tlargest_correct = 0\r\n\t\tsmallest_wrong = 0\r\n\t\tfor i in range(0, 10):\r\n\t\t\ttf.reset_default_graph()\r\n\t\t\terr_w, accuracy = run_network(net_name, current_factor, max_epoch)\r\n\t\t\tif accuracy >= NET_ACC[net_name]:\r\n\t\t\t\tsave_path = directory + '/' + (str)(current_factor) + '_' + (str)(accuracy)\r\n\t\t\t\twith open(save_path, 'w') as f:\r\n\t\t\t\t\tpickle.dump(err_w, f)\r\n\t\t\t\tlargest_correct = current_factor\r\n\t\t\t\tif smallest_wrong == 0:\r\n\t\t\t\t\tcurrent_factor = current_factor * 2\r\n\t\t\t\telse:\r\n\t\t\t\t\tcurrent_factor = (current_factor + smallest_wrong) / 2.\r\n\t\t\telse:\r\n\t\t\t\tsmallest_wrong = current_factor\r\n\t\t\t\tcurrent_factor = (current_factor + largest_correct) / 2.\r\n\r\ndef gen_noise_dist(net_name, cost_factor, count, n_epoch):\r\n\tdirectory = '/home/ahmed/projects/NN_quant/results/deltas/' + net_name\r\n\tif not os.path.exists(directory):\r\n\t\tos.makedirs(directory) \r\n\tfor i in range(0, count):\r\n\t\ttf.reset_default_graph()\r\n\t\terr_w, accuracy = run_network(net_name, cost_factor, n_epoch)\r\n\t\t#save_path = directory + '/' + (str)(cost_factor) + '_' + (str)(accuracy)\r\n\t\tsave_path = directory + '/' + (str)(i) + '_' + (str)(accuracy)\r\n\t\twith open(save_path, 'w') as f:\r\n\t\t\tpickle.dump(err_w, f)\r\n\r\n'''\r\npath_net = '/home/ahmed/projects/NN_quant/rlbitwidth.tfmodels/caffe2tf/tfmodels/resnet18/resnet18.py'\r\nlayers = load.get_layers(path_net)\r\nacc = {}\r\nfor i in range(0, len(layers)):\r\n\tpath = '/home/ahmed/projects/NN_quant/results/quantized/resnet18/resnet18_10_' + layers[i] + '_7.pickle'\r\n\ttf.reset_default_graph()\r\n\tacc[layers[i]] = eval_imagenet_q('resnet18', path)\r\n\tprint(\"\\n\\n\")\r\n\tprint(str(i) + \"/\" + str(len(layers)))\r\n\tprint(acc)\r\nprint(acc)\r\n'''\r\n\r\ndef eval_normalized_layers():\r\n\tacc = {}\r\n\tcount = 1\r\n\tfor dirpath, subdirs, fileList in os.walk('/home/ahmed/projects/NN_quant/results/normalized/resnet18/'):\r\n\t\tfor filename in fileList:\r\n\t\t\taddr = (os.path.join(dirpath, filename))\r\n\t\t\ttf.reset_default_graph()\r\n\t\t\tprint()\r\n\t\t\tprint(count)\r\n\t\t\tprint(filename)\r\n\t\t\tprint()\r\n\t\t\tcount = count + 1\r\n\t\t\tacc[filename] = eval_imagenet_q('resnet18', addr)\r\n\tprint(acc)\r\n\twith open('out.txt', 'w') as outfile:\r\n\t\toutfile.write(json.dumps(acc))\r\n\r\n#layers_sorted = load.get_layers('/home/ahmed/projects/NN_quant/rlbitwidth.tfmodels/caffe2tf/tfmodels/resnet18/resnet18.py')\r\nshift_back = {}\r\n#for layer in layers_sorted:\r\n#\tshift_back[layer] = 0\r\n\r\n\r\n#print('==================================================================')\r\n#print('TRAINING')\r\n#print('==================================================================')\r\n\r\n'''\r\n# this is for phase I training - retrain a little bit on new dataset 40K - get Wo'\r\n#param_path = '/home/ahmed/projects/NN_quant/rlbitwidth.tfmodels/caffe2tf/tfmodels/alexnet/alexnet.ckpt' # = {Wo}\r\n# this is for phase II training - retrain to minimize the quantization error - >> get W1\r\n\r\n#path_save = '/home/ahmed/projects/NN_quant/results/quantized/resnet18/resnet18'\r\n#path_save_q = path_save + '_layers_shift_quant_10May.pickle'\r\n#param_path = '/home/ahmed/projects/NN_quant/results/quantized/resnet18/May12_resnet18_10_fc1000_5_bits.pickle'\r\nparam_path = Wo_resent18\r\nsave_path_params = path_save + '_layers_shift_quant_retrain_A_10May.pickle'\r\n\r\nacc, netparams = eval_imagenet('resnet18', param_path, shift_back, trainable=True, err_mean=None, err_stddev=None, train_vars=None, cost_factor=800., n_epoch=1)\r\nprint(acc)\r\nwith open(save_path_params, 'w') as f:\r\n\tpickle.dump(netparams, f)\r\n'''\r\n\r\ndef get_stats(network_name):\r\n\t# get weights \r\n\tnetparams = load.get_netparams('./nn_quant_and_run_code_train/rlbitwidth.tfmodels/caffe2tf/tfmodels/'+network_name+'/'+network_name+'.ckpt')\r\n\tweights = netparams['weights']\r\n\t# get layers\r\n\tlayers_sorted = load.get_layers('./nn_quant_and_run_code_train/rlbitwidth.tfmodels/caffe2tf/tfmodels/'+network_name+'/'+network_name+'.py')\r\n\ttot_num_layers = len(layers_sorted)\r\n\tcols = ['layer_idx_norm', 'n', 'c', 'k', 'std']\r\n\ttmp_lst = []\r\n\tfor i, layer in enumerate(layers_sorted, start=1):\r\n\t\tlayer_shape = weights[layer].shape\r\n\t\tif len(layer_shape) == 2:\r\n\t\t\tk = 0\r\n\t\t\tn, c = layer_shape\r\n\t\telse:\r\n\t\t\tk, _, n, c = layer_shape\r\n\t\tweights_layer = weights[layer].ravel()\r\n\t\tidx_norm = i/tot_num_layers\r\n\t\tstd = np.var(weights_layer)\r\n\t\ttmp_lst.append([idx_norm, n, c, k, std])\r\n\r\n\tdf = pd.DataFrame(tmp_lst, columns=cols)\r\n\treturn df # to access --> df.loc[i, 'std'] \r\n\r\n\r\ndef quantize_and_run(qbits):\r\n\r\n\tinput_file = './nn_quant_and_run_code_train/rlbitwidth.tfmodels/caffe2tf/tfmodels/alexnet/alexnet.ckpt'\r\n\t\r\n\t\"\"\" Quantization \"\"\"\r\n\tnbits = 16\r\n\tpath_save = './nn_quant_and_run_code/results/quantized/alexnet/'\r\n\tpath_save_q = path_save + 'alexnet_layers_quant_'+ str(nbits) +'-bits_23Sep.pickle'\r\n\t#layers_sorted = load.get_layers('/backup/amir-tc/rl_quantization/rl_quantization.code/nn_quant_and_run_code/rlbitwidth.tfmodels/caffe2tf/tfmodels/alexnet/alexnet.py')\r\n\tlayers_sorted = load.get_layers('./nn_quant_and_run_code_train/rlbitwidth.tfmodels/caffe2tf/tfmodels/alexnet/alexnet.py')\r\n\t#bits_q = [nbits] * len(layers_sorted)\r\n\tbits_q = qbits\r\n\tpath_params = input_file\r\n\tquantize_network(path_params, layers_sorted, path_save_q, bits_q)\r\n\r\n\tprint('==================================================================')\r\n\tprint('INFERENCE')\r\n\tprint('==================================================================')\r\n\t\r\n\t\"\"\" Run Inference \"\"\"\r\n\t#path_save_q = path_save + '_layers_shift_quant_10May.pickle'\r\n\t#param_path = save_path_params\r\n\t#param_path = '/home/ahmed/projects/NN_quant/results/quantized/resnet18/resnet18_layers_shift_quant_retrain_A_10May.pickle'\r\n\tparam_path = path_save_q\r\n\twith tf.Graph().as_default():\r\n\t\tacc, netparams = eval_imagenet('alexnet', param_path, shift_back, trainable=False, err_mean=None, err_stddev=None, train_vars=None, cost_factor=0., n_epoch=1)\r\n\r\n\treturn acc\r\n\r\ndef quantize_and_train(network_name, layer_index, layer_name, qbits, init_params, file_idx):\r\n\t\"\"\" full precision \"\"\"\r\n\t#input_file = './rlbitwidth.tfmodels/caffe2tf/tfmodels/' + network_name + '/' + network_name + '.ckpt'\r\n\t\r\n\tprint('==================================================================')\r\n\tprint('Quantization')\r\n\tprint('==================================================================')\r\n\t\r\n\t\"\"\" Quantization \"\"\"\r\n\t\"\"\" 1) we initialize based on the quantized input pattern (?) \"\"\"\r\n\tpath_save = './nn_quant_and_run_code/results/quantized/' + network_name + '/'\r\n\tpath_save_q = path_save + 'train_1_init_' + network_name + '_layers_quant_17Oct.pickle'\r\n\tlayers_sorted = load.get_layers('./nn_quant_and_run_code_train/rlbitwidth.tfmodels/caffe2tf/tfmodels/' + network_name + '/' + network_name + '.py')\r\n\r\n\t\"\"\" always start with the most recent retrained model \"\"\"\r\n\tpath_params = init_params\r\n\r\n\tquantize_network(path_params, layers_sorted, path_save_q, qbits)\r\n\r\n\tprint('==================================================================')\r\n\tprint('TRAINING')\r\n\tprint('==================================================================')\r\n\t\r\n\t\"\"\" Run retraining \"\"\"\r\n\t\"\"\" use the full precision weights for initialization, or the most recent retrained \"\"\"\r\n\t\"\"\" this is used to calculate the quantization difference regularizer \"\"\"\r\n\tparam_path = init_params\r\n\tparam_q_path = path_save_q\r\n\r\n\twith tf.Graph().as_default():\r\n\t\tacc, netparams = eval_imagenet(network_name, param_path, param_q_path, qbits, layer_index, layer_name, file_idx, shift_back, trainable=True, err_mean=None, err_stddev=None, train_vars=None, cost_factor=200., n_epoch=1)\r\n\tprint(acc)\r\n\r\n\t\"\"\" path for saving the retrained model \"\"\"\r\n\tpath_save = './nn_quant_and_run_code/results/quantized/' + network_name + '/' + network_name\r\n\tpath_save_params = path_save + '_train_1_layers_quant_retrained_17Oct_RL.pickle'\r\n\t\r\n\t# AHMED: debug\r\n\t#print('retrained = ', np.amax(netparams['weights']['conv2']))\r\n\t#print('len set = ', len(set(np.array(netparams['weights']['conv2']))))\r\n\t# ------------\r\n\t\r\n\twith open(path_save_params, 'wb') as f:\r\n\t\tpickle.dump(netparams, f)\r\n\r\n\tprint('==================================================================')\r\n\tprint('TRAINING DONE!')\r\n\tprint('==================================================================')\r\n\t\r\n\r\ndef quantize_and_run_any(network, qbits):\r\n\r\n\tprint('network:', network)\r\n\tinput_file = './nn_quant_and_run_code/rlbitwidth.tfmodels/caffe2tf/tfmodels/' + network +'/' + network +'.ckpt'\r\n\t\r\n\r\n\tprint('==================================================================')\r\n\tprint('Quantization')\r\n\tprint('==================================================================')\r\n\t\r\n\t\"\"\" Quantization \"\"\"\r\n\tnbits = 10\r\n\tpath_save = './nn_quant_and_run_code/results/quantized/'+ network +'/'\r\n\tpath_save_q = path_save + network +'_layers_quant_'+ str(nbits) +'-bits_date.pickle'\r\n\t#layers_sorted = load.get_layers('/backup/amir-tc/rl_quantization/rl_quantization.code/nn_quant_and_run_code/rlbitwidth.tfmodels/caffe2tf/tfmodels/alexnet/alexnet.py')\r\n\tlayers_sorted = load.get_layers('./nn_quant_and_run_code_train/rlbitwidth.tfmodels/caffe2tf/tfmodels/'+ network +'/'+ network +'.py')\r\n\t#bits_q = [nbits] * len(layers_sorted)\r\n\tbits_q = qbits\r\n\tpath_params = input_file\r\n\tquantize_network(path_params, layers_sorted, path_save_q, bits_q)\r\n\r\n\tprint('==================================================================')\r\n\tprint('INFERENCE')\r\n\tprint('==================================================================')\r\n\t\r\n\t\"\"\" Run Inference \"\"\"\r\n\t#path_save_q = path_save + '_layers_shift_quant_10May.pickle'\r\n\t#param_path = save_path_params\r\n\t#param_path = '/home/ahmed/projects/NN_quant/results/quantized/resnet18/resnet18_layers_shift_quant_retrain_A_10May.pickle'\r\n\tparam_path = path_save_q\r\n\twith tf.Graph().as_default():\r\n\t\tacc, netparams = eval_imagenet(network, param_path, shift_back, trainable=False, err_mean=None, err_stddev=None, train_vars=None, cost_factor=0., n_epoch=1)\r\n\treturn acc\r\n\r\ndef run_inference(network, input_param_path, qbits):\r\n\r\n\tprint('==================================================================')\r\n\tprint('Quantization')\r\n\tprint('==================================================================')\r\n\t\r\n\t\"\"\" Quantization \"\"\"\r\n\tnbits = 10\r\n\tpath_save = './nn_quant_and_run_code/results/quantized/'+ network +'/'\r\n\tpath_save_q = path_save + network +'train_1_test_retrained_quantized.pickle'\r\n\t#layers_sorted = load.get_layers('/backup/amir-tc/rl_quantization/rl_quantization.code/nn_quant_and_run_code/rlbitwidth.tfmodels/caffe2tf/tfmodels/alexnet/alexnet.py')\r\n\tlayers_sorted = load.get_layers('./nn_quant_and_run_code_train/rlbitwidth.tfmodels/caffe2tf/tfmodels/'+ network +'/'+ network +'.py')\r\n\t#bits_q = [nbits] * len(layers_sorted)\r\n\tbits_q = qbits\r\n\tpath_params = input_param_path\r\n\tquantize_network(path_params, layers_sorted, path_save_q, bits_q)\r\n\r\n\tprint('==================================================================')\r\n\tprint('INFERENCE')\r\n\tprint('==================================================================')\r\n\t\r\n\t#param_path = input_param_path\r\n\tparam_path = path_save_q\r\n\tparam_q_path = ''\r\n\tlayer_index = 0\r\n\tlayer_name = 0\r\n\tfile_idx = 0\r\n\tshift_back = {}\r\n\twith tf.Graph().as_default():\r\n\t\tacc, netparams = eval_imagenet(network, param_path, param_q_path, qbits, layer_index, layer_name, file_idx, shift_back, trainable=False, err_mean=None, err_stddev=None, train_vars=None, cost_factor=0., n_epoch=1)\r\n\treturn acc\r\n\r\n\r\ndef train_test_svhn_net(network_name, params, istrain, cost_factor, n_epoch, qbits, layer_index):\r\n\tckpt_path = CKPT_PATH + network_name + '/' + network_name + '.ckpt'\r\n\tckpt_path = '/home/ahmed/projects/SVHNClassifier/logs/train/latest.ckpt'\r\n\tprint('==================================================================')\r\n\tprint('Training')\r\n\tprint('==================================================================')\r\n\t\r\n\tparam_path = params\r\n\r\n\tif network_name == 'lenet':\r\n\t\twith tf.Graph().as_default():\r\n\t\t\tacc, netparams = eval_lenet(net_name=network_name, param_path=param_path, qbits=qbits, layer_index=layer_index, trainable=True, n_epoch=n_epoch)\r\n\telse:\r\n\t\treturn eval_imagenet(network_name, ckpt_path, trainable=istrain, err_mean=err_mean, err_stddev=err_stddev, train_vars=train_vars, cost_factor=cost_factor, n_epoch=n_epoch)\r\n\r\n\t#save_path_params = 'lenet_retrained.pickle'\r\n\tpath_save = './nn_quant_and_run_code/results/quantized/' + network_name + '/' + network_name\r\n\tpath_params_retrained = path_save + '_train_1_layers_quant_retrained_17Oct_RL.pickle'\r\n\t\r\n\twith open(path_params_retrained, 'wb') as f:\r\n\t\tpickle.dump(netparams, f)\r\n\treturn acc\r\n\r\n\r\ndef train_test_lenet(network_name, params, istrain, cost_factor, n_epoch, qbits, layer_index):\r\n\tckpt_path = CKPT_PATH + network_name + '/' + network_name + '.ckpt'\r\n\terr_mean = [0.0, 0.0, 0.0, 0.0] #order: input, weights, biases, layers\r\n\terr_stddev = [0.0, 0.0, 0.0, 0.0]\r\n\ttrain_vars = [False, True, False, False]\r\n\t#istrain = True\r\n\r\n\t\"\"\"\r\n\tprint('==================================================================')\r\n\tprint('Training')\r\n\tprint('==================================================================')\r\n\t\r\n\t#param_path = params\r\n\tparam_path = ckpt_path\r\n\r\n\tif network_name == 'lenet':\r\n\t\twith tf.Graph().as_default():\r\n\t\t\tacc, netparams = eval_lenet(net_name=network_name, param_path=param_path, qbits=qbits, layer_index=layer_index, trainable=True, n_epoch=n_epoch)\r\n\telse:\r\n\t\treturn eval_imagenet(network_name, ckpt_path, trainable=istrain, err_mean=err_mean, err_stddev=err_stddev, train_vars=train_vars, cost_factor=cost_factor, n_epoch=n_epoch)\r\n\r\n\t#save_path_params = 'lenet_retrained.pickle'\r\n\tpath_save = '../nn_quant_and_run_code/results/quantized/' + network_name + '/' + network_name\r\n\tpath_params_retrained = path_save + '_train_1_layers_quant_retrained_17Oct_RL.pickle'\r\n\t\r\n\twith open(path_params_retrained, 'wb') as f:\r\n\t\tpickle.dump(netparams, f)\r\n \r\n\tprint('==================================================================')\r\n\tprint('Quantization')\r\n\tprint('==================================================================')\r\n\t\r\n\tpath_save = '../nn_quant_and_run_code/results/quantized/'+ network_name +'/'\r\n\tpath_save_q = path_save + network_name +'train_1_test_retrained_quantized.pickle'\r\n\t#layers_sorted = load.get_layers('/backup/amir-tc/rl_quantization/rl_quantization.code/nn_quant_and_run_code/rlbitwidth.tfmodels/caffe2tf/tfmodels/alexnet/alexnet.py')\r\n\tlayers_sorted = load.get_layers('../nn_quant_and_run_code_train/rlbitwidth.tfmodels/caffe2tf/tfmodels/'+ network_name +'/'+ network_name +'.py')\r\n\t#bits_q = [nbits] * len(layers_sorted)\r\n\tbits_q = qbits\r\n\tpath_params = path_params_retrained\r\n\tquantize_network(path_params, layers_sorted, path_save_q, bits_q)\r\n\t\"\"\"\r\n\r\n\tprint('==================================================================')\r\n\tprint('INFERENCE')\r\n\tprint('==================================================================')\r\n\t\r\n\t#param_path = input_param_path\r\n\t#param_path = path_save_q\r\n\tpath_pytorch_model = \"./pytorch_models/lenet_mnist.pickle\"\r\n\tacc_test, _ = eval_lenet(net_name=network_name, param_path=path_pytorch_model , qbits=qbits, layer_index=layer_index, trainable=False, n_epoch=1)\r\n\t#acc_test, _ = eval_lenet(net_name=network_name, param_path=path_save_q , qbits=qbits, layer_index=layer_index, trainable=False, n_epoch=1)\r\n\treturn 100, acc_test\r\n\t#return acc, acc_test\r\n\r\n\r\ndef write_to_csv(step_data):\r\n with open('train_1_acc.csv', 'a') as csvFile:\r\n writer = csv.writer(csvFile)\r\n writer.writerow(step_data)\r\n\r\ndef write_to_data(step_data):\r\n with open('svhn_data.csv', 'a') as csvFile:\r\n writer = csv.writer(csvFile)\r\n writer.writerow(step_data)\r\n\r\n\r\n# def main():\r\n\r\n# csv file initialization:\r\nheaders = ['acc']\r\nwith open('train_1_acc.csv', 'w') as writeFile:\r\n writer = csv.writer(writeFile)\r\n writer.writerow(headers)\r\n\r\nheaders = ['lambda_w', 'lambda_b', 'total_loss', 'acc_loss', 'loss_sin2_total', 'loss_bits_total', 'sin2_loss', 'sin2_loss_2', 'nbits', 'nbits_2']\r\nwith open('svhn_data.csv', 'w') as writeFile:\r\n writer = csv.writer(writeFile)\r\n writer.writerow(headers)\r\n \r\n\r\ndef retrain(network_name, episode_num, layer_index, qbits):\r\n\t\"\"\"\r\n\t\t1- read initial model (or the one from previous iteration) --> PARAMS(1)\r\n\t\t2- From RL: \r\n\t\t\t- read layer index \r\n\t\t\t- read #bits\r\n\t\t3- quantize: starting from \"FP\" (@ start of each episode) ->- quantize --> PARAMS_q\r\n\t\t3''- quantize: starting from \"recent_retrained\" ->- quantize --> \"PARAMS_q\"\r\n\t\t4- calculate the quantization error of the input layer: ||(FP(layer) - PARAMS_q(layer))||^2\r\n\t\t4''- calculate the quantization error of the input layer: ||(recent_retrained(layer) - PARAMS_q(layer))||^2\r\n\t\t5- add this quantization error to the objective function \r\n\t\t6- initialize with PARAMS(1), fix previous layers (except 1st and last) and run retraining ... --> PARAMS_retrained\r\n\t\t7- (caching!!)\r\n\t\t* assume independent retraining for independent episodes \r\n\t\"\"\"\r\n\r\n\t\"\"\" \r\n\t- init_params = is the parameter file for retraining initialization\r\n\t- if starting the episode, then init_params comes from the full precision ckpt,\r\n\t\t\totherwise, it comes from the most recent retrained file \r\n\t\"\"\"\r\n\r\n\tglobal file_idx\r\n\tpath_save = './nn_quant_and_run_code/results/quantized/' + network_name + '/' + network_name\r\n\tpath_params_retrained = path_save + '_train_1_layers_quant_retrained_17Oct_RL.pickle'\r\n\t#if path_params_retrained.is_file():\r\n\r\n\t\"\"\" - init_params = is the parameter file for retraining initialization\r\n\t\t- if starting the episode, then init_params comes from the full precision ckpt,\r\n\t\t\totherwise, it comes from the most recent retrained file \"\"\"\r\n\t#if (episode_num==0) and (layer_index==1):\r\n\tif (layer_index==1):\r\n\t\tinit_params = './nn_quant_and_run_code_train/rlbitwidth.tfmodels/caffe2tf/tfmodels/'+network_name+'/'+network_name+'.ckpt'\r\n\t\t\"\"\" randomly pick 100k images to retrain on \"\"\"\r\n\t\t#file_idx = random.randint(1,13)\r\n\t\tfile_idx = 1\r\n\telse:\r\n\t\tinit_params = path_params_retrained\r\n\r\n\r\n\tif network_name=='lenet':\r\n\t\tacc = train_test_lenet(network_name='lenet', params=init_params, istrain=True, cost_factor=0, n_epoch=5, qbits=qbits, layer_index=layer_index)\r\n\t\treturn acc\r\n\r\n\telif network_name=='svhn_net':\r\n\t\tacc = train_test_svhn_net(network_name='lenet', params=init_params, istrain=True, cost_factor=0, n_epoch=5, qbits=qbits, layer_index=layer_index)\r\n\t\treturn acc\r\n\r\n\telse:\r\n\t\t\"\"\" accelerated fine-tuning \"\"\"\r\n\t\tlayers_sorted = load.get_layers('./nn_quant_and_run_code_train/rlbitwidth.tfmodels/caffe2tf/tfmodels/'+network_name+'/'+network_name+'.py')\r\n\t\tlayer_name = layers_sorted[layer_index]\r\n\r\n\t\tinit_params = './nn_quant_and_run_code_train/rlbitwidth.tfmodels/caffe2tf/tfmodels/'+network_name+'/'+network_name+'.ckpt'\r\n\t\tquantize_and_train(network_name, layer_index, layer_name, qbits, init_params, file_idx)\r\n\r\n\t\t\"\"\" validation accuracy after fine-tuning \"\"\"\r\n\t\tpath_save = './nn_quant_and_run_code/results/quantized/' + network_name + '/' + network_name\r\n\t\tpath_params_retrained = path_save + '_train_1_layers_quant_retrained_17Oct_RL.pickle'\r\n\t\tacc = run_inference(network_name, path_params_retrained, qbits)\r\n\t\treturn acc\r\n\r\n\r\n\"\"\" SVHN NET layers \"\"\"\r\n# 16 LAYERS: dict_keys(['digit1', 'digit2', 'digit3', 'digit4', 'digit5', 'digit_length', 'hidden1', 'hidden10', 'hidden2', 'hidden3', 'hidden4', 'hidden5', 'hidden6', 'hidden7', 'hidden8', 'hidden9'])\r\n\r\nsvhn_num_layers = 16\r\nqbits = {}\r\n# keep Full Precision \r\nqbits['hidden1'] = 4 # layer 1\r\n\r\nqbits['hidden2'] = 4 # layer 2\r\nqbits['hidden3'] = 4 # layer 3\r\nqbits['hidden4'] = 4 # layer 4\r\nqbits['hidden5'] = 4 # layer 5\r\n\r\nqbits['hidden6'] = 4 # layer 6\r\nqbits['hidden7'] = 8 # layer 7\r\nqbits['hidden8'] = 8 # layer 8\r\nqbits['hidden9'] = 8 # layer 9\r\n\r\nqbits['hidden10'] = 16 # layer 10\r\n\r\n# keep Full Precision \r\nqbits['digit_length'] = 16 # layer 11\r\n\r\nqbits['digit1'] = 16 # layer 12\r\nqbits['digit2'] = 16 # layer 13\r\nqbits['digit3'] = 16 # layer 14\r\nqbits['digit4'] = 16 # layer 15\r\nqbits['digit5'] = 16 # layer 16\r\n\r\n#for key, _ in qbits.items():\r\n#\tqbits[key] = 8\r\n\r\nprint(qbits)\r\n#acc = eval_svhn_net(net_name='svhn_net', qbits_dict=qbits, trainable=False, n_epoch=100)\r\n#print(acc)\r\n\r\n#reg_strength = [1000, 400, 200, 100, 50, 30, 20, 10, 5, 1, 0]\r\nreg_strength = [1] \r\ndata = []\r\nfor lambda_q in reg_strength:\r\n\tacc = eval_svhn_net(net_name='svhn_net', qbits_dict=qbits, trainable=True, cost_factor=lambda_q, n_epoch=20000, init=0)\r\n\tdata.append((acc, lambda_q))\r\nprint(data)\r\nfor each in data: print(each)\r\n\r\n#acc = eval_svhn_net(net_name='svhn_net', qbits_dict=qbits, trainable=True, n_epoch=50, init=1)\r\n#print(acc)\r\n\r\n" ]
[ [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.get_variable", "tensorflow.metrics.accuracy", "tensorflow.FixedLenFeature", "tensorflow.gfile.Exists", "tensorflow.cast", "pandas.DataFrame", "tensorflow.nn.l2_loss", "tensorflow.train.AdamOptimizer", "tensorflow.TFRecordReader", "numpy.var", "tensorflow.train.batch", "tensorflow.Graph", "tensorflow.Variable", "tensorflow.decode_raw", "numpy.save", "tensorflow.subtract", "tensorflow.train.exponential_decay", "tensorflow.reset_default_graph", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.nn.in_top_k", "tensorflow.argmax", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.as_string", "tensorflow.train.shuffle_batch", "tensorflow.losses.sparse_softmax_cross_entropy", "tensorflow.train.Coordinator", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.train.string_input_producer", "numpy.tanh", "numpy.sum", "tensorflow.sin", "tensorflow.constant", "tensorflow.local_variables_initializer", "tensorflow.train.start_queue_runners", "numpy.set_printoptions", "tensorflow.reshape", "tensorflow.random_crop", "tensorflow.image.convert_image_dtype" ] ]
Eduardof0nt/Lab-4-Visio-n
[ "803fb60973748148ca3c4da21e75c1d372744df2" ]
[ "Ejercicio-2/Ejercicio-2.py" ]
[ "import matplotlib\nimport matplotlib.pyplot as plt\n\nimport skimage.filters\nfrom skimage import io\nimport os\n\nimage1 = io.imread(os.getcwd() + '/Ejercicio-2/Img/ruido02.jpg', as_gray=False)\n\nfilteredImage1 = skimage.filters.gaussian(image1, multichannel=False, sigma=2)\n\nfig, ax = plt.subplots(1, 1)\nax.imshow(image1, cmap=plt.cm.gray)\nax.set_title('Original')\nax.axis('off')\n\nfig1, ax1 = plt.subplots(1, 1)\nax1.hist(image.ravel(), bins=256, histtype='step', color='black', density=True)\nax1.set_title('Histograma Original')\n\nfig2, ax2 = plt.subplots(1, 1)\nax2.imshow(filteredImage1, cmap=plt.cm.gray)\nax2.set_title('Filtrada')\nax2.axis('off')\n\nplt.show()" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
Nikeliza/MaskTextSpotter
[ "d1a02d4c2d5111e7846c53227353a84f8588aaf5" ]
[ "maskrcnn_benchmark/modeling/backbone/mobilenet_v2.py" ]
[ "import torch.nn as nn\nfrom maskrcnn_benchmark.modeling.make_layers import inverted_residual_sequence, conv2d_bn_relu6\n\n\nclass MobileNetV2(nn.Module):\n def __init__(self, args):\n super(MobileNetV2, self).__init__()\n\n # s1, s2 = 2, 2\n # if args.DOWNSAMPLING == 16:\n # s1, s2 = 2, 1\n # elif args.DOWNSAMPLING == 8:\n # s1, s2 = 1, 1\n\n # Network is created here, then will be unpacked into nn.sequential\n self.network_settings = [{'t': -1, 'c': 32, 'n': 1, 's': 1},\n {'t': 1, 'c': 16, 'n': 1, 's': 1},\n {'t': 6, 'c': 24, 'n': 2, 's': 1},\n {'t': 6, 'c': 32, 'n': 3, 's': 2},\n {'t': 6, 'c': 64, 'n': 4, 's': 2},\n {'t': 6, 'c': 128, 'n': 3, 's': 2},\n {'t': 6, 'c': 256, 'n': 3, 's': 2},\n {'t': 6, 'c': 512, 'n': 1, 's': 2},\n {'t': None, 'c': 1280, 'n': 1, 's': 1}]\n self.num_classes = args.MODEL.ROI_BOX_HEAD.NUM_CLASSES - 1\n\n ###############################################################################################################\n\n # Feature Extraction part\n # Layer 0\n self.network = [\n conv2d_bn_relu6(args.NUM_CHANNELS,\n int(self.network_settings[0]['c'] * args.WIDTH_MULTIPLIER),\n args.KERNEL_SIZE,\n self.network_settings[0]['s'], args.DROUPOUT_PROB)]\n\n # Layers from 1 to 7\n for i in range(1, 8):\n self.network.extend(\n inverted_residual_sequence(\n int(self.network_settings[i - 1]['c'] * args.WIDTH_MULTIPLIER),\n int(self.network_settings[i]['c'] * args.WIDTH_MULTIPLIER),\n self.network_settings[i]['n'], self.network_settings[i]['t'],\n args.KERNEL_SIZE, self.network_settings[i]['s']))\n\n # Last layer before flattening\n # self.network.append(\n # conv2d_bn_relu6(int(self.network_settings[7]['c'] * args.WIDTH_MULTIPLIER),\n # int(self.network_settings[8]['c'] * args.WIDTH_MULTIPLIER), 1,\n # self.network_settings[8]['s'],\n # args.DROUPOUT_PROB))\n\n ###############################################################################################################\n\n # Classification part\n # self.network.append(nn.Dropout2d(args.DROUPOUT_PROB, inplace=True))\n # self.network.append(nn.AvgPool2d(\n # (args.IMG_HEIGHT // args.DOWNSAMPLING, args.IMG_WIDTH // args.DOWNSAMPLING)))\n # self.network.append(nn.Dropout2d(args.DROUPOUT_PROB, inplace=True))\n # self.network.append(\n # nn.Conv2d(int(self.network_settings[8]['c'] * args.WIDTH_MULTIPLIER), self.num_classes,\n # 1, bias=True))\n\n self.network = nn.Sequential(*self.network)\n\n self.initialize()\n\n # def forward(self, x):\n # # Debugging mode\n # # for op in self.network:\n # # x = op(x)\n # # print(x.shape)\n # x = self.network(x)\n # x = x.view(-1, self.num_classes)\n\n # return x\n\n def forward(self, x):\n outputs = []\n # x = self.network(x)\n for idx, stage_name in enumerate(self.network):\n x = stage_name(x)\n if idx in [10, 13, 16, 17]:\n outputs.append(x)\n return outputs\n\n def initialize(self):\n \"\"\"Initializes the model parameters\"\"\"\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n nn.init.xavier_normal(m.weight)\n if m.bias is not None:\n nn.init.constant(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant(m.weight, 1)\n nn.init.constant(m.bias, 0)\n" ]
[ [ "torch.nn.Sequential", "torch.nn.init.constant", "torch.nn.init.xavier_normal" ] ]
rio26/clustering
[ "8d4a3eb0648de448cc880aa3a79a6b764d67be7a" ]
[ "clustering/snmf-clustering.py" ]
[ "import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.gridspec as gridspec\nimport utils, SNMF\nimport numpy as np\nfrom sklearn.metrics.pairwise import chi2_kernel,sigmoid_kernel, laplacian_kernel,polynomial_kernel\n\n## ALL datasets: \n## 2 clusters: 'happy', 'be2', 'hm', 'hm2'\n## 3 clusters: 'sp', 'be3', 'dc3', 'ds3'\n## 4 clusters: 'g4', 'ds4'\n## 5 clusters: 'ds5'\n## 6 clusters: 'tar'\ndata_sets_2d = ['be2', 'hm', 'hm2', 'sp', 'happy', 'be3', 'dc3', 'ds3', 'g4', 'ds4', 'ds5', 'tar']\ndata_sets_hd = ['ch', 'hm', 'sp']\n\nplt.figure()\nplt.subplots_adjust(left=0.1, right=0.98, bottom=0.05, top=0.98, wspace=0.2,\n hspace=0.3)\n\nLABEL_COLOR_MAP = {0 : 'y',\n 1 : 'b',\n 2 : 'c',\n 3 : 'k',\n 4 : 'm',\n 5 : 'r'}\n\nn_cluster = 2\nflag = True\n\n### Data Pre-processing ###\nfor i in range(len(data_sets_2d)):\n# for i in range(2):\n\tif i == 3 or i == 8 or i == 10:\n\t\tn_cluster += 1 # manually increasing cluster number\n\tX0 = np.asmatrix(utils.load_dot_mat('data/DB.mat', 'DB/' + data_sets_2d[i]))\n\tmin_diff = 0 - X0.min() + 1\n\tif min_diff > 1:\n\t\tprint(\"Target matrix has negative element(s). Setting them positive... \\n\")\n\t\tfor row in range(X0.shape[0]):\n\t\t\tfor col in range(X0.shape[1]):\n\t\t\t\tX0[row,col] = X0[row,col] + min_diff\n\n\tinitial_gamma = 1/X0.shape[0]\n\t# kernel_X0 = utils.gaussian_kernel(X0, 3)\n\t# kernel_X0 = chi2_kernel(X0, gamma=5)\n\t# kernel_X0 = laplacian_kernel(X0, gamma=.3)\n\t# kernel_X0 = polynomial_kernel(X0, degree=2, gamma=.5, coef0=1)\n\tkernel_X0 = sigmoid_kernel(X0, gamma=0.1, coef0=0)\n\n\tprint(\"Running on dataset:\", i, \" with cluster number: \", n_cluster, \"...\")\n\tX = kernel_X0 * kernel_X0.T\n\t\n### Initialization ###\n\tinitial_h = np.asmatrix(np.random.rand(X.shape[0], n_cluster)) \n\t# initial_h = np.asmatrix(np.random.randint(0,X.max(),size=[X.shape[0], n_cluster]))\n\n### Compute NMF ###\n\tcluster = SNMF.SNMF(X, h_init = initial_h, r = n_cluster, max_iter =1000)\n\tprint(\"Staring error: \",cluster.frobenius_norm())\n\t# cluster_result = cluster.proj_solver()\n\t# cluster_result = cluster.proj_solver_bug()\n\tcluster_result = cluster.mur()\n\n\n\terror = cluster.get_error_trend()\n\t# plt.plot(error)\n\t# print(error)\n\tprint(\"Final error: \",cluster.frobenius_norm(), \"Task \", i, \" done. \\n\")\n\t# print(cluster_result[0,:])\n\ty_pred = np.zeros([X.shape[0]])\n\tfor row in range(len(y_pred)):\n\t\ty_pred[row] = np.argmax(cluster_result[row,:])\n\t\t# print(y_pred[row])\n\t# print(\"type & value:\", type(y_pred), y_pred.shape)\n\tlabel_color = [LABEL_COLOR_MAP[l] for l in y_pred]\n\n\tplt.subplot(3,4,i+1)\n\tplt.scatter([X0[:, 0]], [X0[:, 1]], c=label_color, s = 5)\n\nplt.tight_layout()\nplt.show()\n" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.scatter", "sklearn.metrics.pairwise.sigmoid_kernel", "matplotlib.pyplot.subplot", "numpy.argmax", "numpy.random.rand", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
rolando-archive/theusual-kaggle-seeclickfix-ensemble
[ "6879d4e5686443a3ad7d7b91062c51d7241e9125" ]
[ "Bryan/misc.py" ]
[ "\"\"\"\r\n=================================================================================================\r\nMisc code snippets used in the ipython console throughout the project for development and exploration,\r\nbut NOT directly referenced in the final program execution.\r\n=================================================================================================\r\n\"\"\"\r\n__author__ = 'Bryan Gregory'\r\n__email__ = '[email protected]'\r\n__date__ = '12-24-2013'\r\n\r\n#Internal modules\r\nimport train\r\nimport data_io\r\n\r\n#External modules\r\nfrom scipy import sparse\r\nfrom sklearn.externals import joblib\r\nimport sys\r\nimport csv\r\nimport json\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.datasets import dump_svmlight_file\r\nfrom sklearn import (metrics, cross_validation, linear_model, ensemble, tree, preprocessing, svm, neighbors, gaussian_process, naive_bayes, neural_network, pipeline, lda)\r\n\r\n################################################################################################\r\n#----------------------------------------------------------------------------#\r\n#----List of all current SKLearn learning algorithms capable of regression---#\r\n#----------------------------------------------------------------------------#\r\n\r\n#----For larger data sets------#\r\n#clf = linear_model.LogisticRegression(penalty='l2', dual=True, tol=0.0001, C=1, fit_intercept=True, intercept_scaling=1.0, class_weight=None, random_state=None);clf_name='log'\r\n#clf = linear_model.SGDRegressor(alpha=0.001, n_iter=800,shuffle=True); clf_name='SGD_001_800'\r\n#clf = linear_model.Ridge();clf_name = 'RidgeReg'\r\n#clf = linear_model.LinearRegression();clf_name = 'LinReg'\r\n#clf = linear_model.ElasticNet()\r\n#clf = linear_model.Lasso();clf_name = 'Lasso'\r\n#clf = linear_model.LassoCV(cv=3);clf_name = 'LassoCV'\r\n#clf = svm.SVR(kernel = 'poly',cache_size = 16000.0) #use .ravel(), kernel='rbf','linear','poly','sigmoid'\r\n#clf = svm.NuSVR(nu=0.5, C=1.0, kernel='linear', degree=3, gamma=0.0, coef0=0.0, shrinking=True, probability=False, tol=0.001, cache_size=20000, verbose=False, max_iter=-1)\r\n\r\n#----For smaller data sets------# (Do not work or have very long training times on large sparse datasets) Require .todense()\r\n#clf = ensemble.RandomForestRegressor(n_estimators=50); clfname='RFReg_50'\r\n#clf = ensemble.ExtraTreesRegressor(n_estimators=30) #n_jobs = -1 if running in a main() loop\r\n#clf = ensemble.GradientBoostingRegressor(n_estimators=700, learning_rate=.1, max_depth=1, random_state=888, loss='ls');clf_name='GBM'\r\nclf = ensemble.AdaBoostRegressor(base_estimator=tree.DecisionTreeRegressor(compute_importances=None, criterion='mse', max_depth=3,\r\n max_features=None, min_density=None, min_samples_leaf=1,\r\n min_samples_split=2, random_state=None, splitter='best'),\r\n n_estimators=150, learning_rate=.5, loss='linear', random_state=None)\r\n#clf = gaussian_process.GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1, random_start=100)\r\n#clf = neighbors.KNeighborsRegressor(100, weights='uniform', algorithm = 'auto');clf_name='KNN_200'\r\n\r\n\r\n################################################################################################\r\n#---Different methods of cross validation---#\r\n#May require mtxTrn.toarray()\r\ncv_preds = train.cross_validate(hstack([sparse.csr_matrix(dfTrn.urlid.values).transpose(),mtxTrn]),mtxTrnTarget.ravel(),\r\n folds=10,SEED=42,test_size=.1,clf=clf,clf_name=clf_name,pred_fg=True)\r\ntrain.cross_validate(mtxTrn,mtxTrnTarget.ravel(),folds=8,SEED=888,test_size=.1,clf=clf,clf_name=clf_name,pred_fg=False)\r\ntrain.cross_validate_temporal(mtxTrn,mtxTest,mtxTrnTarget.ravel(),mtxTestTarget.ravel(),clf=clf,\r\n clf_name=clf_name,pred_fg=False)\r\ntrain.cross_validate_using_benchmark('global_mean',dfTrn, mtxTrn,mtxTrnTarget,folds=20)\r\n\r\n\r\n################################################################################################\r\n#---Calculate the degree of variance between ground truth and the mean of the CV predictions.----#\r\n#---Returns a list of all training records with their average variance---#\r\ntrain.calc_cv_preds_var(dfTrn,cv_preds)\r\n\r\n\r\n################################################################################################\r\n#--Use estimator for manual predictions--#\r\ndfTest, clf = train.predict(mtxTrn,mtxTrnTarget.ravel(),mtxTest,dfTest,clf,clf_name) #may require mtxTest.toarray()\r\ndfTest, clf = train.predict(mtxTrn.todense(),mtxTrnTarget.ravel(),mtxTest.todense(),dfTest,clf,clf_name) #may require mtxTest.toarray()\r\n\r\n################################################################################################\r\n#--Save feature matrices in svm format for external modeling--#\r\ny_trn = np.asarray(dfTrn.num_votes)\r\ny_test = np.ones(mtxTest.shape[0], dtype = int )\r\ndump_svmlight_file(mtxTrn, y_trn, f = 'Data/Votes_trn.svm', zero_based = False )\r\ndump_svmlight_file(mtxTest, y_test, f = 'Data/Votes_test.svm', zero_based = False )\r\n\r\n################################################################################################\r\n#--Save a model to joblib file--#\r\ndata_io.save_cached_object(clf,'rf_500_TextAll')\r\n\r\n#--Load a model from joblib file--#\r\ndata_io.load_cached_object('Models/040513--rf_500_TextAll.joblib.pk1')\r\n\r\n################################################################################################\r\n#--Save text feature names list for later reference--#\r\ndata_io.save_text_features('Data/text_url_features.txt',tfidf_vec.get_feature_names())\r\n" ]
[ [ "sklearn.datasets.dump_svmlight_file", "sklearn.tree.DecisionTreeRegressor", "numpy.asarray", "scipy.sparse.csr_matrix", "numpy.ones" ] ]
ttngu207/Li-2015a
[ "a7a4f78aadc8574284d702bf3474b661bcdd7ef1" ]
[ "pipeline/plot/unit_characteristic_plot.py" ]
[ "import numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport itertools\nimport pandas as pd\n\nfrom pipeline import experiment, ephys, psth\nfrom pipeline.plot.util import (_plot_with_sem, _extract_one_stim_dur, _get_units_hemisphere, _get_clustering_method,\n _plot_stacked_psth_diff, _plot_avg_psth,\n _get_photostim_time_and_duration, _get_trial_event_times,\n _jointplot_w_hue)\n\nm_scale = 1200\n_plt_xmin = -3\n_plt_xmax = 2\n\n\ndef plot_clustering_quality(probe_insertion):\n probe_insertion = probe_insertion.proj()\n amp, snr, spk_rate, isi_violation = (ephys.Unit * ephys.UnitStat\n * ephys.ProbeInsertion.InsertionLocation & probe_insertion).fetch(\n 'unit_amp', 'unit_snr', 'avg_firing_rate', 'isi_violation')\n\n metrics = {'amp': amp,\n 'snr': snr,\n 'isi': np.array(isi_violation) * 100, # to percentage\n 'rate': np.array(spk_rate)}\n label_mapper = {'amp': 'Amplitude',\n 'snr': 'Signal to noise ratio (SNR)',\n 'isi': 'ISI violation (%)',\n 'rate': 'Firing rate (spike/s)'}\n\n fig, axs = plt.subplots(2, 3, figsize=(12, 8))\n fig.subplots_adjust(wspace=0.4)\n\n for (m1, m2), ax in zip(itertools.combinations(list(metrics.keys()), 2), axs.flatten()):\n ax.plot(metrics[m1], metrics[m2], '.k')\n ax.set_xlabel(label_mapper[m1])\n ax.set_ylabel(label_mapper[m2])\n\n # cosmetic\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n\ndef plot_unit_characteristic(probe_insertion, axs=None):\n probe_insertion = probe_insertion.proj()\n amp, snr, spk_rate, x, y, insertion_depth = (\n ephys.Unit * ephys.ProbeInsertion.InsertionLocation * ephys.UnitStat\n & probe_insertion & 'unit_quality != \"all\"').fetch(\n 'unit_amp', 'unit_snr', 'avg_firing_rate', 'unit_posx', 'unit_posy', 'dv_location')\n\n insertion_depth = np.where(np.isnan(insertion_depth), 0, insertion_depth)\n\n metrics = pd.DataFrame(list(zip(*(amp/amp.max(), snr/snr.max(), spk_rate/spk_rate.max(), x, y + insertion_depth))))\n metrics.columns = ['amp', 'snr', 'rate', 'x', 'y']\n\n if axs is None:\n fig, axs = plt.subplots(1, 3, figsize=(10, 8))\n fig.subplots_adjust(wspace=0.6)\n\n assert axs.size == 3\n\n cosmetic = {'legend': None,\n 'linewidth': 1.75,\n 'alpha': 0.9,\n 'facecolor': 'none', 'edgecolor': 'k'}\n\n sns.scatterplot(data=metrics, x='x', y='y', s=metrics.amp*m_scale, ax=axs[0], **cosmetic)\n sns.scatterplot(data=metrics, x='x', y='y', s=metrics.snr*m_scale, ax=axs[1], **cosmetic)\n sns.scatterplot(data=metrics, x='x', y='y', s=metrics.rate*m_scale, ax=axs[2], **cosmetic)\n\n # cosmetic\n for title, ax in zip(('Amplitude', 'SNR', 'Firing rate'), axs.flatten()):\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_title(title)\n ax.set_xlim((-10, 60))\n\n\ndef plot_unit_selectivity(probe_insertion, axs=None):\n probe_insertion = probe_insertion.proj()\n attr_names = ['unit', 'period', 'period_selectivity', 'contra_firing_rate',\n 'ipsi_firing_rate', 'unit_posx', 'unit_posy', 'dv_location']\n selective_units = (psth.PeriodSelectivity * ephys.Unit * ephys.ProbeInsertion.InsertionLocation\n * experiment.EventPeriod & probe_insertion & 'period_selectivity != \"non-selective\"').fetch(*attr_names)\n selective_units = pd.DataFrame(selective_units).T\n selective_units.columns = attr_names\n selective_units.period_selectivity.astype('category')\n\n # --- account for insertion depth (manipulator depth)\n selective_units.unit_posy = (selective_units.unit_posy\n + np.where(np.isnan(selective_units.dv_location.values.astype(float)),\n 0, selective_units.dv_location.values.astype(float)))\n\n # --- get ipsi vs. contra firing rate difference\n f_rate_diff = np.abs(selective_units.ipsi_firing_rate - selective_units.contra_firing_rate)\n selective_units['f_rate_diff'] = f_rate_diff / f_rate_diff.max()\n\n # --- prepare for plotting\n cosmetic = {'legend': None,\n 'linewidth': 0.0001}\n ymax = selective_units.unit_posy.max() + 100\n\n # a bit of hack to get 'open circle'\n pts = np.linspace(0, np.pi * 2, 24)\n circ = np.c_[np.sin(pts) / 2, -np.cos(pts) / 2]\n vert = np.r_[circ, circ[::-1] * .7]\n\n open_circle = mpl.path.Path(vert)\n\n # --- plot\n if axs is None:\n fig, axs = plt.subplots(1, 3, figsize=(10, 8))\n fig.subplots_adjust(wspace=0.6)\n\n assert axs.size == 3\n\n for (title, df), ax in zip(((p, selective_units[selective_units.period == p])\n for p in ('sample', 'delay', 'response')), axs):\n sns.scatterplot(data=df, x='unit_posx', y='unit_posy',\n s=df.f_rate_diff.values.astype(float)*m_scale,\n hue='period_selectivity', marker=open_circle,\n palette={'contra-selective': 'b', 'ipsi-selective': 'r'},\n ax=ax, **cosmetic)\n contra_p = (df.period_selectivity == 'contra-selective').sum() / len(df) * 100\n # cosmetic\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_title(f'{title}\\n% contra: {contra_p:.2f}\\n% ipsi: {100-contra_p:.2f}')\n ax.set_xlim((-10, 60))\n # ax.set_ylim((0, ymax))\n\n\ndef plot_unit_bilateral_photostim_effect(probe_insertion, clustering_method=None, axs=None):\n probe_insertion = probe_insertion.proj()\n\n if clustering_method is None:\n try:\n clustering_method = _get_clustering_method(probe_insertion)\n except ValueError as e:\n raise ValueError(str(e) + '\\nPlease specify one with the kwarg \"clustering_method\"')\n\n dv_loc = (ephys.ProbeInsertion.InsertionLocation & probe_insertion).fetch1('dv_location')\n\n no_stim_cond = (psth.TrialCondition\n & {'trial_condition_name':\n 'all_noearlylick_bilateral_alm_nostim'}).fetch1('KEY')\n\n bi_stim_cond = (psth.TrialCondition\n & {'trial_condition_name':\n 'all_noearlylick_bilateral_alm_stim'}).fetch1('KEY')\n\n # get photostim duration\n stim_durs = np.unique((experiment.Photostim & experiment.PhotostimEvent\n * psth.TrialCondition().get_trials('all_noearlylick_bilateral_alm_stim')\n & probe_insertion).fetch('duration'))\n stim_dur = _extract_one_stim_dur(stim_durs)\n\n units = ephys.Unit & probe_insertion & {'clustering_method': clustering_method} & 'unit_quality != \"all\"'\n\n metrics = pd.DataFrame(columns=['unit', 'x', 'y', 'frate_change'])\n\n _, cue_onset = _get_trial_event_times(['delay'], units, 'all_noearlylick_bilateral_alm_nostim')\n cue_onset = cue_onset[0]\n\n # XXX: could be done with 1x fetch+join\n for u_idx, unit in enumerate(units.fetch('KEY', order_by='unit')):\n\n x, y = (ephys.Unit & unit).fetch1('unit_posx', 'unit_posy')\n\n # obtain unit psth per trial, for all nostim and bistim trials\n nostim_trials = ephys.Unit.TrialSpikes & unit & psth.TrialCondition.get_trials(no_stim_cond['trial_condition_name'])\n bistim_trials = ephys.Unit.TrialSpikes & unit & psth.TrialCondition.get_trials(bi_stim_cond['trial_condition_name'])\n\n nostim_psths, nostim_edge = psth.compute_unit_psth(unit, nostim_trials.fetch('KEY'), per_trial=True)\n bistim_psths, bistim_edge = psth.compute_unit_psth(unit, bistim_trials.fetch('KEY'), per_trial=True)\n\n # compute the firing rate difference between contra vs. ipsi within the stimulation duration\n ctrl_frate = np.array([nostim_psth[np.logical_and(nostim_edge >= cue_onset,\n nostim_edge <= cue_onset + stim_dur)].mean()\n for nostim_psth in nostim_psths])\n stim_frate = np.array([bistim_psth[np.logical_and(bistim_edge >= cue_onset,\n bistim_edge <= cue_onset + stim_dur)].mean()\n for bistim_psth in bistim_psths])\n\n frate_change = (stim_frate.mean() - ctrl_frate.mean()) / ctrl_frate.mean()\n frate_change = abs(frate_change) if frate_change < 0 else 0.0001\n\n metrics.loc[u_idx] = (int(unit['unit']), x, y - dv_loc, frate_change)\n\n metrics.frate_change = metrics.frate_change / metrics.frate_change.max()\n\n fig = None\n if axs is None:\n fig, axs = plt.subplots(1, 1, figsize=(4, 8))\n\n cosmetic = {'legend': None,\n 'linewidth': 1.75,\n 'alpha': 0.9,\n 'facecolor': 'none', 'edgecolor': 'k'}\n\n sns.scatterplot(data=metrics, x='x', y='y', s=metrics.frate_change*m_scale,\n ax=axs, **cosmetic)\n\n axs.spines['right'].set_visible(False)\n axs.spines['top'].set_visible(False)\n axs.set_title('% change')\n axs.set_xlim((-10, 60))\n\n return fig\n\n\ndef plot_stacked_contra_ipsi_psth(units, axs=None):\n units = units.proj()\n\n if axs is None:\n fig, axs = plt.subplots(1, 2, figsize=(20, 20))\n assert axs.size == 2\n\n trial_cond_name = psth.TrialCondition.get_cond_name_from_keywords(['good_noearlylick_', '_hit'])[0]\n period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, trial_cond_name)\n\n hemi = _get_units_hemisphere(units)\n\n conds_i = (psth.TrialCondition\n & {'trial_condition_name':\n 'good_noearlylick_left_hit' if hemi == 'left' else 'good_noearlylick_right_hit'}).fetch1('KEY')\n\n conds_c = (psth.TrialCondition\n & {'trial_condition_name':\n 'good_noearlylick_right_hit' if hemi == 'left' else 'good_noearlylick_left_hit'}).fetch1('KEY')\n\n sel_i = (ephys.Unit * psth.UnitSelectivity\n & 'unit_selectivity = \"ipsi-selective\"' & units)\n\n sel_c = (ephys.Unit * psth.UnitSelectivity\n & 'unit_selectivity = \"contra-selective\"' & units)\n\n # ipsi selective ipsi trials\n psth_is_it = (psth.UnitPsth * sel_i.proj('unit_posy') & conds_i).fetch(order_by='unit_posy desc')\n # ipsi selective contra trials\n psth_is_ct = (psth.UnitPsth * sel_i.proj('unit_posy') & conds_c).fetch(order_by='unit_posy desc')\n # contra selective contra trials\n psth_cs_ct = (psth.UnitPsth * sel_c.proj('unit_posy') & conds_c).fetch(order_by='unit_posy desc')\n # contra selective ipsi trials\n psth_cs_it = (psth.UnitPsth * sel_c.proj('unit_posy') & conds_i).fetch(order_by='unit_posy desc')\n\n _plot_stacked_psth_diff(psth_cs_ct, psth_cs_it, ax=axs[0],\n vlines=period_starts, flip=True)\n _plot_stacked_psth_diff(psth_is_it, psth_is_ct, ax=axs[1],\n vlines=period_starts)\n\n # cosmetic\n for ax, title in zip(axs, ('Contra-selective Units', 'Ipsi-selective Units')):\n ax.set_title(title)\n ax.set_ylabel('Unit')\n ax.set_xlabel('Time to go-cue (s)')\n ax.set_xlim([_plt_xmin, _plt_xmax])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n\ndef plot_selectivity_sorted_stacked_contra_ipsi_psth(units, axs=None):\n units = units.proj()\n\n if axs is None:\n fig, axs = plt.subplots(1, 2, figsize=(20, 20))\n assert axs.size == 2\n\n trial_cond_name = psth.TrialCondition.get_cond_name_from_keywords(['good_noearlylick_', '_hit'])[0]\n period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, trial_cond_name)\n\n hemi = _get_units_hemisphere(units)\n\n conds_i = (psth.TrialCondition\n & {'trial_condition_name':\n 'good_noearlylick_left_hit' if hemi == 'left' else 'good_noearlylick_right_hit'}).fetch1('KEY')\n\n conds_c = (psth.TrialCondition\n & {'trial_condition_name':\n 'good_noearlylick_right_hit' if hemi == 'left' else 'good_noearlylick_left_hit'}).fetch1('KEY')\n\n # ---- separate units to:\n # i) sample or delay not response:\n sample_delay_units = units & (psth.PeriodSelectivity\n & 'period in (\"sample\", \"delay\")'\n & 'period_selectivity != \"non-selective\"')\n sample_delay_units = sample_delay_units & (psth.PeriodSelectivity & units\n & 'period = \"response\"'\n & 'period_selectivity = \"non-selective\"')\n # ii) sample or delay and response:\n sample_delay_response_units = units & (psth.PeriodSelectivity\n & 'period in (\"sample\", \"delay\")'\n & 'period_selectivity != \"non-selective\"')\n sample_delay_response_units = sample_delay_response_units & (psth.PeriodSelectivity & units\n & 'period = \"response\"'\n & 'period_selectivity != \"non-selective\"')\n # iii) not sample nor delay and response:\n response_units = (units & (psth.PeriodSelectivity & 'period in (\"sample\")'\n & 'period_selectivity = \"non-selective\"')\n & (psth.PeriodSelectivity & 'period in (\"delay\")'\n & 'period_selectivity = \"non-selective\"'))\n response_units = response_units & (psth.PeriodSelectivity & units\n & 'period = \"response\"'\n & 'period_selectivity != \"non-selective\"')\n\n ipsi_selective_psth, contra_selective_psth = [], []\n for units in (sample_delay_units, sample_delay_response_units, response_units):\n sel_i = (ephys.Unit * psth.UnitSelectivity\n & 'unit_selectivity = \"ipsi-selective\"' & units)\n sel_c = (ephys.Unit * psth.UnitSelectivity\n & 'unit_selectivity = \"contra-selective\"' & units)\n\n # ipsi selective ipsi trials\n psth_is_it = (psth.UnitPsth * sel_i & conds_i).fetch()\n # ipsi selective contra trials\n psth_is_ct = (psth.UnitPsth * sel_i & conds_c).fetch()\n # contra selective contra trials\n psth_cs_ct = (psth.UnitPsth * sel_c & conds_c).fetch()\n # contra selective ipsi trials\n psth_cs_it = (psth.UnitPsth * sel_c & conds_i).fetch()\n\n contra_selective_psth.append(_plot_stacked_psth_diff(psth_cs_ct, psth_cs_it, ax=axs[0], flip=True, plot=False))\n ipsi_selective_psth.append(_plot_stacked_psth_diff(psth_is_it, psth_is_ct, ax=axs[1], plot=False))\n\n contra_boundaries = np.cumsum([len(k) for k in contra_selective_psth[::-1]])\n ipsi_boundaries = np.cumsum([len(k) for k in ipsi_selective_psth[::-1]])\n\n contra_selective_psth = np.vstack(contra_selective_psth)\n ipsi_selective_psth = np.vstack(ipsi_selective_psth)\n\n xlim = -3, 2\n im = axs[0].imshow(contra_selective_psth, cmap=plt.cm.bwr,\n aspect=4.5/contra_selective_psth.shape[0],\n extent=[-3, 3, 0, contra_selective_psth.shape[0]])\n im.set_clim((-1, 1))\n\n im = axs[1].imshow(ipsi_selective_psth, cmap=plt.cm.bwr,\n aspect=4.5/ipsi_selective_psth.shape[0],\n extent=[-3, 3, 0, ipsi_selective_psth.shape[0]])\n im.set_clim((-1, 1))\n\n # cosmetic\n for ax, title, hspans in zip(axs, ('Contra-selective Units', 'Ipsi-selective Units'),\n (contra_boundaries, ipsi_boundaries)):\n for x in period_starts:\n ax.axvline(x=x, linestyle='--', color='k')\n ax.set_title(title)\n ax.set_ylabel('Unit')\n ax.set_xlabel('Time to go-cue (s)')\n ax.set_xlim(xlim)\n for ystart, ystop, color in zip([0]+list(hspans[:-1]), hspans, ('k', 'grey', 'w')):\n ax.axhspan(ystart, ystop, 0.98, 1, alpha=1, color=color)\n\n\ndef plot_avg_contra_ipsi_psth(units, axs=None):\n units = units.proj()\n\n if axs is None:\n fig, axs = plt.subplots(1, 2, figsize=(16, 6))\n assert axs.size == 2\n\n period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick_hit')\n\n hemi = _get_units_hemisphere(units)\n\n good_unit = ephys.Unit & 'unit_quality != \"all\"'\n\n conds_i = (psth.TrialCondition\n & {'trial_condition_name':\n 'good_noearlylick_left_hit' if hemi == 'left' else 'good_noearlylick_right_hit'}).fetch('KEY')\n\n conds_c = (psth.TrialCondition\n & {'trial_condition_name':\n 'good_noearlylick_right_hit' if hemi == 'left' else 'good_noearlylick_left_hit'}).fetch('KEY')\n\n sel_i = (ephys.Unit * psth.UnitSelectivity\n & 'unit_selectivity = \"ipsi-selective\"' & units)\n\n sel_c = (ephys.Unit * psth.UnitSelectivity\n & 'unit_selectivity = \"contra-selective\"' & units)\n\n psth_is_it = (((psth.UnitPsth & conds_i)\n * ephys.Unit.proj('unit_posy'))\n & good_unit.proj() & sel_i.proj()).fetch(\n 'unit_psth', order_by='unit_posy desc')\n\n psth_is_ct = (((psth.UnitPsth & conds_c)\n * ephys.Unit.proj('unit_posy'))\n & good_unit.proj() & sel_i.proj()).fetch(\n 'unit_psth', order_by='unit_posy desc')\n\n psth_cs_ct = (((psth.UnitPsth & conds_c)\n * ephys.Unit.proj('unit_posy'))\n & good_unit.proj() & sel_c.proj()).fetch(\n 'unit_psth', order_by='unit_posy desc')\n\n psth_cs_it = (((psth.UnitPsth & conds_i)\n * ephys.Unit.proj('unit_posy'))\n & good_unit.proj() & sel_c.proj()).fetch(\n 'unit_psth', order_by='unit_posy desc')\n\n _plot_avg_psth(psth_cs_it, psth_cs_ct, period_starts, axs[0],\n 'Contra-selective')\n _plot_avg_psth(psth_is_it, psth_is_ct, period_starts, axs[1],\n 'Ipsi-selective')\n\n ymax = max([ax.get_ylim()[1] for ax in axs])\n for ax in axs:\n ax.set_ylim((0, ymax))\n ax.set_xlim([_plt_xmin, _plt_xmax])\n\n\ndef plot_psth_photostim_effect(units, condition_name_kw=['bilateral_alm'], axs=None):\n \"\"\"\n For the specified `units`, plot PSTH comparison between stim vs. no-stim with left/right trial instruction\n The stim location (or other appropriate search keywords) can be specified in `condition_name_kw` (default: bilateral ALM)\n \"\"\"\n units = units.proj()\n\n if axs is None:\n fig, axs = plt.subplots(1, 2, figsize=(16, 6))\n assert axs.size == 2\n\n hemi = _get_units_hemisphere(units)\n\n period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick_hit')\n\n # no photostim:\n psth_n_l = psth.TrialCondition.get_cond_name_from_keywords(['_nostim', '_left'])[0]\n psth_n_r = psth.TrialCondition.get_cond_name_from_keywords(['_nostim', '_right'])[0]\n\n psth_n_l = (psth.UnitPsth * psth.TrialCondition & units\n & {'trial_condition_name': psth_n_l} & 'unit_psth is not NULL').fetch('unit_psth')\n psth_n_r = (psth.UnitPsth * psth.TrialCondition & units\n & {'trial_condition_name': psth_n_r} & 'unit_psth is not NULL').fetch('unit_psth')\n\n psth_s_l = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim_left'])[0]\n psth_s_r = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim_right'])[0]\n\n psth_s_l = (psth.UnitPsth * psth.TrialCondition & units\n & {'trial_condition_name': psth_s_l} & 'unit_psth is not NULL').fetch('unit_psth')\n psth_s_r = (psth.UnitPsth * psth.TrialCondition & units\n & {'trial_condition_name': psth_s_r} & 'unit_psth is not NULL').fetch('unit_psth')\n\n # get photostim duration and stim time (relative to go-cue)\n stim_trial_cond_name = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim'])[0]\n stim_time, stim_dur = _get_photostim_time_and_duration(units,\n psth.TrialCondition().get_trials(stim_trial_cond_name))\n\n if hemi == 'left':\n psth_s_i = psth_s_l\n psth_n_i = psth_n_l\n psth_s_c = psth_s_r\n psth_n_c = psth_n_r\n else:\n psth_s_i = psth_s_r\n psth_n_i = psth_n_r\n psth_s_c = psth_s_l\n psth_n_c = psth_n_l\n\n _plot_avg_psth(psth_n_i, psth_n_c, period_starts, axs[0],\n 'Control')\n _plot_avg_psth(psth_s_i, psth_s_c, period_starts, axs[1],\n 'Photostim')\n\n # cosmetic\n ymax = max([ax.get_ylim()[1] for ax in axs])\n for ax in axs:\n ax.set_ylim((0, ymax))\n ax.set_xlim([_plt_xmin, _plt_xmax])\n\n # add shaded bar for photostim\n axs[1].axvspan(stim_time, stim_time + stim_dur, alpha=0.3, color='royalblue')\n\n\ndef plot_selectivity_change_photostim_effect(units, condition_name_kw, recover_time_window=None, ax=None):\n \"\"\"\n For each unit in the specified units, extract:\n + control, left-instruct PSTH (ctrl_left)\n + control, right-instruct PSTH (ctrl_right)\n + stim, left-instruct PSTH (stim_left)\n + stim, right-instruct PSTH (stim_right)\n Then, control_PSTH and stim_PSTH is defined as\n (ctrl_left - ctrl_right) for ipsi-selective unit that locates on the left-hemisphere, and vice versa\n (stim_left - stim_right) for ipsi-selective unit that locates on the left-hemisphere, and vice versa\n Selectivity change is then defined as: control_PSTH - stim_PSTH\n \"\"\"\n trial_cond_name = psth.TrialCondition.get_cond_name_from_keywords(['good_noearlylick_', '_hit'])[0]\n period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, trial_cond_name)\n\n stim_trial_cond_name = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim'])[0]\n stim_time, stim_dur = _get_photostim_time_and_duration(units,\n psth.TrialCondition().get_trials(stim_trial_cond_name))\n\n ctrl_left_cond_name = 'all_noearlylick_nostim_left'\n ctrl_right_cond_name = 'all_noearlylick_nostim_right'\n stim_left_cond_name = psth.TrialCondition().get_cond_name_from_keywords(condition_name_kw\n + ['noearlylick', 'stim', 'left'])[0]\n stim_right_cond_name = psth.TrialCondition().get_cond_name_from_keywords(condition_name_kw\n + ['noearlylick', 'stim', 'right'])[0]\n\n delta_sels, ctrl_psths = [], []\n for unit in (units * psth.UnitSelectivity & 'unit_selectivity != \"non-selective\"').proj('unit_selectivity').fetch(as_dict=True):\n # ---- trial count criteria ----\n # no less than 5 trials for control\n if (len(psth.TrialCondition.get_trials(ctrl_left_cond_name) & unit) < 5\n or len(psth.TrialCondition.get_trials(ctrl_right_cond_name) & unit) < 5):\n continue\n # no less than 2 trials for stimulation\n if (len(psth.TrialCondition.get_trials(stim_left_cond_name) & unit) < 2\n or len(psth.TrialCondition.get_trials(stim_right_cond_name) & unit) < 2):\n continue\n\n hemi = _get_units_hemisphere(unit)\n\n ctrl_left_psth, t_vec = psth.UnitPsth.get_plotting_data(unit, {'trial_condition_name': ctrl_left_cond_name})['psth']\n ctrl_right_psth, _ = psth.UnitPsth.get_plotting_data(unit, {'trial_condition_name': ctrl_right_cond_name})['psth']\n try:\n stim_left_psth, _ = psth.UnitPsth.get_plotting_data(unit, {'trial_condition_name': stim_left_cond_name})['psth']\n stim_right_psth, _ = psth.UnitPsth.get_plotting_data(unit, {'trial_condition_name': stim_right_cond_name})['psth']\n except:\n continue\n\n if unit['unit_selectivity'] == 'ipsi-selective':\n ctrl_psth_diff = ctrl_left_psth - ctrl_right_psth if hemi == 'left' else ctrl_right_psth - ctrl_left_psth\n stim_psth_diff = stim_left_psth - stim_right_psth if hemi == 'left' else stim_right_psth - stim_left_psth\n elif unit['unit_selectivity'] == 'contra-selective':\n ctrl_psth_diff = ctrl_left_psth - ctrl_right_psth if hemi == 'right' else ctrl_right_psth - ctrl_left_psth\n stim_psth_diff = stim_left_psth - stim_right_psth if hemi == 'right' else stim_right_psth - stim_left_psth\n\n ctrl_psths.append(ctrl_psth_diff)\n delta_sels.append(ctrl_psth_diff - stim_psth_diff)\n\n ctrl_psths = np.vstack(ctrl_psths)\n delta_sels = np.vstack(delta_sels)\n\n if ax is None:\n fig, ax = plt.subplots(1, 1, figsize=(4, 6))\n\n _plot_with_sem(delta_sels, t_vec, ax)\n\n if recover_time_window:\n recovery_times = []\n for i in range(1000):\n i_sample = np.random.choice(delta_sels.shape[0], delta_sels.shape[0], replace = True)\n btstrp_diff = np.nanmean(delta_sels[i_sample, :], axis = 0) / np.nanmean(ctrl_psths[i_sample, :], axis = 0)\n t_recovered = t_vec[\n (btstrp_diff < 0.2) & (t_vec > recover_time_window[0]) & (t_vec < recover_time_window[1])]\n if len(t_recovered) > 0:\n recovery_times.append(t_recovered[0])\n ax.axvline(x = np.mean(recovery_times), linestyle = '--', color = 'g')\n ax.axvspan(np.mean(recovery_times) - np.std(recovery_times), np.mean(recovery_times) + np.std(recovery_times),\n alpha = 0.2, color = 'g')\n\n ax.axhline(y=0, color = 'k')\n for x in period_starts:\n ax.axvline(x=x, linestyle = '--', color = 'k')\n # add shaded bar for photostim\n ax.axvspan(stim_time, stim_time + stim_dur, 0.95, 1, alpha = 0.3, color = 'royalblue')\n ax.set_ylabel('Selectivity change (spike/s)')\n ax.set_xlabel('Time (s)')\n\n\ndef plot_coding_direction(units, time_period=None, axs=None):\n _, proj_contra_trial, proj_ipsi_trial, time_stamps = psth.compute_CD_projected_psth(\n units.fetch('KEY'), time_period=time_period)\n\n period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick')\n\n if axs is None:\n fig, axs = plt.subplots(1, 1, figsize=(8, 6))\n\n # plot\n _plot_with_sem(proj_contra_trial, time_stamps, ax=axs, c='b')\n _plot_with_sem(proj_ipsi_trial, time_stamps, ax=axs, c='r')\n\n for x in period_starts:\n axs.axvline(x=x, linestyle = '--', color = 'k')\n # cosmetic\n axs.spines['right'].set_visible(False)\n axs.spines['top'].set_visible(False)\n axs.set_ylabel('CD projection (a.u.)')\n axs.set_xlabel('Time (s)')\n\n\ndef plot_paired_coding_direction(unit_g1, unit_g2, labels=None, time_period=None):\n \"\"\"\n Plot trial-to-trial CD-endpoint correlation between CD-projected trial-psth from two unit-groups (e.g. two brain regions)\n Note: coding direction is calculated on selective units, contra vs. ipsi, within the specified time_period\n \"\"\"\n _, proj_contra_trial_g1, proj_ipsi_trial_g1, time_stamps = psth.compute_CD_projected_psth(\n unit_g1.fetch('KEY'), time_period=time_period)\n _, proj_contra_trial_g2, proj_ipsi_trial_g2, time_stamps = psth.compute_CD_projected_psth(\n unit_g2.fetch('KEY'), time_period=time_period)\n\n period_starts = _get_trial_event_times(['sample', 'delay', 'go'], unit_g1, 'good_noearlylick')\n\n if labels:\n assert len(labels) == 2\n else:\n labels = ('unit group 1', 'unit group 2')\n\n # plot projected trial-psth\n fig, axs = plt.subplots(1, 2, figsize=(16, 6))\n\n _plot_with_sem(proj_contra_trial_g1, time_stamps, ax=axs[0], c='b')\n _plot_with_sem(proj_ipsi_trial_g1, time_stamps, ax=axs[0], c='r')\n _plot_with_sem(proj_contra_trial_g2, time_stamps, ax=axs[1], c='b')\n _plot_with_sem(proj_ipsi_trial_g2, time_stamps, ax=axs[1], c='r')\n\n # cosmetic\n for ax, label in zip(axs, labels):\n for x in period_starts:\n ax.axvline(x=x, linestyle = '--', color = 'k')\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_ylabel('CD projection (a.u.)')\n ax.set_xlabel('Time (s)')\n ax.set_title(label)\n\n # plot trial CD-endpoint correlation\n p_start, p_end = time_period\n contra_cdend_1 = proj_contra_trial_g1[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)\n contra_cdend_2 = proj_contra_trial_g2[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)\n ipsi_cdend_1 = proj_ipsi_trial_g1[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)\n ipsi_cdend_2 = proj_ipsi_trial_g2[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)\n\n c_df = pd.DataFrame([contra_cdend_1, contra_cdend_2]).T\n c_df.columns = labels\n c_df['trial-type'] = 'contra'\n i_df = pd.DataFrame([ipsi_cdend_1, ipsi_cdend_2]).T\n i_df.columns = labels\n i_df['trial-type'] = 'ipsi'\n df = c_df.append(i_df)\n\n jplot = _jointplot_w_hue(data=df, x=labels[0], y=labels[1], hue= 'trial-type', colormap=['b', 'r'],\n figsize=(8, 6), fig=None, scatter_kws=None)\n jplot['fig'].show()\n\n" ]
[ [ "numpy.abs", "numpy.linspace", "numpy.random.choice", "numpy.isnan", "numpy.logical_and", "matplotlib.path.Path", "matplotlib.pyplot.subplots", "pandas.DataFrame", "numpy.sin", "numpy.cos", "numpy.std", "numpy.mean", "numpy.nanmean", "numpy.array", "numpy.vstack" ] ]
weiwang1206/indigo
[ "463d89b09699a57bfdfbae351646df6a60040b90" ]
[ "env/sender.py" ]
[ "# Copyright 2018 Francis Y. Yan, Jestin Ma\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport time\nimport sys\nimport json\nimport socket\nimport select\nfrom os import path\nimport numpy as np\nimport datagram_pb2\nimport project_root\nfrom helpers.helpers import (\n curr_ts_ms, apply_op,\n READ_FLAGS, ERR_FLAGS, READ_ERR_FLAGS, WRITE_FLAGS, ALL_FLAGS)\n\n\ndef format_actions(action_list):\n \"\"\" Returns the action list, initially a list with elements \"[op][val]\"\n like /2.0, -3.0, +1.0, formatted as a dictionary.\n\n The dictionary keys are the unique indices (to retrieve the action) and\n the values are lists ['op', val], such as ['+', '2.0'].\n \"\"\"\n return {idx: [action[0], float(action[1:])]\n for idx, action in enumerate(action_list)}\n\n\nclass Sender(object):\n # RL exposed class/static variables\n max_steps = 1000\n state_dim = 4\n action_mapping = format_actions([\"/2.0\", \"-10.0\", \"+0.0\", \"+10.0\", \"*2.0\"])\n action_cnt = len(action_mapping)\n\n def __init__(self, port=0, train=False, debug=False):\n self.train = train\n self.debug = debug\n\n # UDP socket and poller\n self.peer_addr = None\n\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.bind(('0.0.0.0', port))\n sys.stderr.write('[sender] Listening on port %s\\n' %\n self.sock.getsockname()[1])\n\n self.poller = select.poll()\n self.poller.register(self.sock, ALL_FLAGS)\n\n self.dummy_payload = 'x' * 1400\n\n if self.debug:\n self.sampling_file = open(path.join(project_root.DIR, 'env', 'sampling_time'), 'w', 0)\n\n # congestion control related\n self.seq_num = 0\n self.next_ack = 0\n self.cwnd = 10.0\n self.step_len_ms = 10\n\n # state variables for RLCC\n self.delivered_time = 0\n self.delivered = 0\n self.sent_bytes = 0\n\n self.min_rtt = float('inf')\n self.delay_ewma = None\n self.send_rate_ewma = None\n self.delivery_rate_ewma = None\n\n self.step_start_ms = None\n self.running = True\n\n if self.train:\n self.step_cnt = 0\n\n self.ts_first = None\n self.rtt_buf = []\n\n def cleanup(self):\n if self.debug and self.sampling_file:\n self.sampling_file.close()\n self.sock.close()\n\n def handshake(self):\n \"\"\"Handshake with peer receiver. Must be called before run().\"\"\"\n\n while True:\n msg, addr = self.sock.recvfrom(1600)\n\n if msg == 'Hello from receiver' and self.peer_addr is None:\n self.peer_addr = addr\n self.sock.sendto('Hello from sender', self.peer_addr)\n sys.stderr.write('[sender] Handshake success! '\n 'Receiver\\'s address is %s:%s\\n' % addr)\n break\n\n self.sock.setblocking(0) # non-blocking UDP socket\n\n def set_sample_action(self, sample_action):\n \"\"\"Set the policy. Must be called before run().\"\"\"\n\n self.sample_action = sample_action\n\n def update_state(self, ack):\n \"\"\" Update the state variables listed in __init__() \"\"\"\n self.next_ack = max(self.next_ack, ack.seq_num + 1)\n curr_time_ms = curr_ts_ms()\n\n # Update RTT\n rtt = float(curr_time_ms - ack.send_ts)\n self.min_rtt = min(self.min_rtt, rtt)\n\n if self.train:\n if self.ts_first is None:\n self.ts_first = curr_time_ms\n self.rtt_buf.append(rtt)\n\n delay = rtt - self.min_rtt\n if self.delay_ewma is None:\n self.delay_ewma = delay\n else:\n self.delay_ewma = 0.875 * self.delay_ewma + 0.125 * delay\n\n # Update BBR's delivery rate\n self.delivered += ack.ack_bytes\n self.delivered_time = curr_time_ms\n delivery_rate = (0.008 * (self.delivered - ack.delivered) /\n max(1, self.delivered_time - ack.delivered_time))\n\n if self.delivery_rate_ewma is None:\n self.delivery_rate_ewma = delivery_rate\n else:\n self.delivery_rate_ewma = (\n 0.875 * self.delivery_rate_ewma + 0.125 * delivery_rate)\n\n # Update Vegas sending rate\n send_rate = 0.008 * (self.sent_bytes - ack.sent_bytes) / max(1, rtt)\n\n if self.send_rate_ewma is None:\n self.send_rate_ewma = send_rate\n else:\n self.send_rate_ewma = (\n 0.875 * self.send_rate_ewma + 0.125 * send_rate)\n\n def take_action(self, action_idx):\n old_cwnd = self.cwnd\n op, val = self.action_mapping[action_idx]\n\n self.cwnd = apply_op(op, self.cwnd, val)\n self.cwnd = max(2.0, self.cwnd)\n\n def window_is_open(self):\n return self.seq_num - self.next_ack < self.cwnd\n\n def send(self):\n data = datagram_pb2.Data()\n data.seq_num = self.seq_num\n data.send_ts = curr_ts_ms()\n data.sent_bytes = self.sent_bytes\n data.delivered_time = self.delivered_time\n data.delivered = self.delivered\n data.payload = self.dummy_payload\n\n serialized_data = data.SerializeToString()\n self.sock.sendto(serialized_data, self.peer_addr)\n\n self.seq_num += 1\n self.sent_bytes += len(serialized_data)\n\n def recv(self):\n serialized_ack, addr = self.sock.recvfrom(1600)\n\n if addr != self.peer_addr:\n return\n\n ack = datagram_pb2.Ack()\n ack.ParseFromString(serialized_ack)\n\n self.update_state(ack)\n\n if self.step_start_ms is None:\n self.step_start_ms = curr_ts_ms()\n\n # At each step end, feed the state:\n if curr_ts_ms() - self.step_start_ms > self.step_len_ms: # step's end\n state = [self.delay_ewma,\n self.delivery_rate_ewma,\n self.send_rate_ewma,\n self.cwnd]\n\n # time how long it takes to get an action from the NN\n if self.debug:\n start_sample = time.time()\n\n action = self.sample_action(state)\n\n if self.debug:\n self.sampling_file.write('%.2f ms\\n' % ((time.time() - start_sample) * 1000))\n\n self.take_action(action)\n\n self.delay_ewma = None\n self.delivery_rate_ewma = None\n self.send_rate_ewma = None\n\n self.step_start_ms = curr_ts_ms()\n\n if self.train:\n self.step_cnt += 1\n if self.step_cnt >= Sender.max_steps:\n self.step_cnt = 0\n self.running = False\n\n self.compute_performance()\n\n def run(self):\n TIMEOUT = 1000 # ms\n\n self.poller.modify(self.sock, ALL_FLAGS)\n curr_flags = ALL_FLAGS\n\n while self.running:\n if self.window_is_open():\n if curr_flags != ALL_FLAGS:\n self.poller.modify(self.sock, ALL_FLAGS)\n curr_flags = ALL_FLAGS\n else:\n if curr_flags != READ_ERR_FLAGS:\n self.poller.modify(self.sock, READ_ERR_FLAGS)\n curr_flags = READ_ERR_FLAGS\n\n events = self.poller.poll(TIMEOUT)\n\n if not events: # timed out\n self.send()\n\n for fd, flag in events:\n assert self.sock.fileno() == fd\n\n if flag & ERR_FLAGS:\n sys.exit('Error occurred to the channel')\n\n if flag & READ_FLAGS:\n self.recv()\n\n if flag & WRITE_FLAGS:\n if self.window_is_open():\n self.send()\n\n def compute_performance(self):\n duration = curr_ts_ms() - self.ts_first\n tput = 0.008 * self.delivered / duration\n perc_delay = np.percentile(self.rtt_buf, 95)\n\n with open(path.join(project_root.DIR, 'env', 'perf'), 'a', 0) as perf:\n perf.write('%.2f %d\\n' % (tput, perc_delay))\n" ]
[ [ "numpy.percentile" ] ]
PuneethKouloorkar/SPC-Water-Model
[ "faa18fe344dee47eb8eb8e939bd15c5876af12e4" ]
[ "plot_traj.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\nx = np.genfromtxt('trajectory', usecols = [1])\ny = np.genfromtxt('trajectory', usecols = [2])\n\n#plotting only the oxygen particle's trajectory\nox = x[::3]\noy = y[::3]\n\nfor i in range(len(ox)-1):\n plt.scatter(ox[i],oy[i],c='k',marker = 'o')\n plt.pause(0.001)\n\nplt.show()" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.pause", "matplotlib.pyplot.scatter", "numpy.genfromtxt" ] ]
jpeg729/pytorch-bits
[ "5d107094042c27472dfb7dee77506b603f5d3e45" ]
[ "models.py" ]
[ "\nfrom torch import nn\nimport nn as custom\n\n\nclass Model(nn.Module):\n def __init__(self, input_size=1, layers=[\"LSTM_51\"], output_size=1, sigmoid=None, tanh=None, biases=True):\n super(Model, self).__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.layers = []\n prev_size = input_size\n for l, spec in enumerate(layers):\n bits = spec.split(\"_\")\n cell_type = bits.pop(0)\n print(spec, cell_type, bits)\n\n if hasattr(custom, cell_type):\n layer = getattr(custom, cell_type)\n elif hasattr(nn, cell_type):\n layer = getattr(nn, cell_type)\n else:\n raise Exception(\"Unrecognised layer type \" + cell_type)\n\n layer_args = {}\n if \"input_size\" in layer.__init__.__code__.co_varnames:\n layer_args[\"input_size\"] = prev_size\n if \"hidden_size\" in layer.__init__.__code__.co_varnames:\n layer_args[\"hidden_size\"] = int(bits.pop(0))\n prev_size = layer_args[\"hidden_size\"]\n \n for a in bits:\n print(a)\n k, v = a.split(\"=\")\n k = k.replace(\"-\", \"_\")\n if k not in layer.__init__.__code__.co_varnames:\n print(\"kwarg\", k, \"for\", cell_type, \"not recognised\")\n continue\n for t in (int, float):\n try:\n v = t(v)\n break\n except ValueError:\n pass\n layer_args[k] = v\n\n if \"tanh\" in layer.__init__.__code__.co_varnames:\n layer_args[\"tanh\"] = tanh\n if \"sigmoid\" in layer.__init__.__code__.co_varnames:\n layer_args[\"sigmoid\"] = sigmoid\n if \"bias\" in layer.__init__.__code__.co_varnames:\n layer_args[\"bias\"] = biases\n\n print(\"Adding layer of type\", spec, \":\", layer_args)\n layer = layer(**layer_args,)\n self.layers.append(layer)\n self.add_module(\"layer\"+str(l), layer)\n\n if prev_size != output_size:\n print(\"Adding linear layer :\", prev_size, \"->\", output_size)\n layer = nn.Linear(prev_size, output_size)\n self.layers.append(layer)\n self.add_module(\"layer\"+str(l+1), layer)\n\n def reset_hidden(self):\n for layer in self.layers:\n if hasattr(layer, \"reset_hidden\"):\n layer.reset_hidden()\n # for module in self.modules():\n # if module is not self and hasattr(module, \"reset_hidden\"):\n # module.reset_hidden()\n \n def detach_hidden(self):\n for layer in self.layers:\n if hasattr(layer, \"detach_hidden\"):\n layer.detach_hidden()\n\n def forward(self, data, future=0):\n for layer in self.layers:\n data = layer(data)\n return data\n" ]
[ [ "torch.nn.Linear" ] ]
KIPAC/NWayMatch
[ "3cc4dfd8816dfb373a3f1174801549a05a2baf52" ]
[ "python/nway/nway.py" ]
[ "\"\"\" Proof of concept for n-way matching using footprint detection\n\nSome terminology:\n\ncell : A small area, used to find matches, The size\n should be about the same as the maximum match radius.\n\nsource : As per DM, per-catalog detection\n\nmatchWcs : The WCS used to define the cells\n\nsubRegion : A square sub-region of the skymap defined by the matchWcs\n\nsourceCountsMap : A map of the number of source per cell, made per\n subRegion\n\nCluster: A set of sources found by running a footprint finding algorithm\n on a sourceCountsMap\n\n\"\"\"\n\nimport sys\nimport os\nimport glob\n\nfrom collections import OrderedDict\n\nimport time\nimport numpy as np\nfrom astropy import wcs\nfrom astropy.table import Table\nfrom astropy.table import vstack\nfrom astropy.io import fits\n\ntry:\n import pyarrow.parquet as pq # noqa\nexcept ImportError:\n print(\"nway requires pyarrrow\")\n\ntry:\n import lsst.afw.detection as afwDetect\n import lsst.afw.image as afwImage\nexcept ImportError:\n print(\"nway requires lsst.afw\") \n \nRECURSE_MAX = 200\nCOLUMNS = ['ra', 'decl', 'visit', 'ccd', 'sky_source', 'sourceId', 'PsFlux', 'PsFluxErr', 'Centroid_flag', 'detect_isPrimary']\n\ndef createGlobalWcs(refDir, cellSize, nCell):\n \"\"\" Helper function to create the WCS used to project the\n sources in a skymap \"\"\"\n w = wcs.WCS(naxis=2)\n w.wcs.cdelt = [-cellSize, cellSize]\n w.wcs.crpix = [nCell[0]/2, nCell[1]/2]\n w.wcs.crval = [refDir[0], refDir[1]]\n return w\n\ndef clusterStats(clusterDict):\n \"\"\" Helper function to get stats about the clusters\n\n 'Orphan' means single source clusters (i.e., single detections)\n 'Mixed` means there is more that one source from at least one\n input catalog\n 'Confused' means there are more than four cases of duplication\n \"\"\"\n nOrphan = 0\n nMixed = 0\n nConfused = 0\n for val in clusterDict.values():\n if val.nSrc == 1:\n nOrphan += 1\n if val.nSrc != val.nUnique:\n nMixed += 1\n if val.nSrc > val.nUnique + 3:\n nConfused += 1\n return np.array([len(clusterDict), nOrphan, nMixed, nConfused])\n\n\nclass ClusterData:\n \"\"\" Class to store data about clusters\n\n Parameters\n ----------\n iCluster : `int`\n Cluster ID\n origCluster : `int`\n Id of the original cluster this cluster was made from\n nSrc : `int`\n Number of sources in this cluster\n nUnique : `int`\n Number of catalogs contributing sources to this cluster\n catIndices : `np.array`, [`int`]\n Indices of the catalogs of sources associated to this cluster\n sourcdIds : `np.array`, [`int`]\n Sources IDs of the sources associated to this cluster\n sourcdIdxs : `np.array`, [`int`]\n Indices of the sources with their respective catalogs\n xCent : `float`\n X-pixel value of cluster centroid (in WCS used to do matching)\n yCent : `float`\n Y-pixel value of cluster centroid (in WCS used to do matching)\n \"\"\"\n def __init__(self, iCluster, footprint, sources, origCluster=None):\n self._iCluster = iCluster\n self._footprint = footprint\n if origCluster is None:\n self._origCluster = self._iCluster\n else:\n self._origCluster = origCluster\n self._catIndices = sources[0]\n self._sourceIds = sources[1]\n self._sourceIdxs = sources[2]\n self._nSrc = self._catIndices.size\n self._nUnique = len(np.unique(self._catIndices))\n self._objects = []\n self._xCent = None\n self._yCent = None\n self._dist2 = None\n self._rmsDist = None\n self.xCell = None\n self.yCell = None\n self.snr = None\n\n def extract(self, subRegionData):\n \"\"\" Extract the xCell, yCell and snr data from\n the sources in this cluster\n \"\"\"\n self.xCell = np.zeros((self._nSrc), np.float32)\n self.yCell = np.zeros((self._nSrc), np.float32)\n self.snr = np.zeros((self._nSrc), np.float32)\n for i, (iCat, srcIdx) in enumerate(zip(self._catIndices, self._sourceIdxs)):\n self.xCell[i] = subRegionData.data[iCat]['xcell'].values[srcIdx]\n self.yCell[i] = subRegionData.data[iCat]['ycell'].values[srcIdx]\n self.snr[i] = subRegionData.data[iCat]['SNR'].values[srcIdx]\n\n def clearTempData(self):\n \"\"\" Remove temporary data only used when making objects \"\"\"\n self.xCell = None\n self.yCell = None\n self.snr = None\n\n @property\n def iCluster(self):\n \"\"\" Return the cluster ID \"\"\"\n return self._iCluster\n\n @property\n def nSrc(self):\n \"\"\" Return the number of sources associated to the cluster \"\"\"\n return self._nSrc\n\n @property\n def nUnique(self):\n \"\"\" Return the number of catalogs contributing sources to the cluster \"\"\"\n return self._nUnique\n\n @property\n def sourceIds(self):\n \"\"\" Return the source IDs associated to this cluster \"\"\"\n return self._sourceIds\n\n @property\n def dist2(self):\n \"\"\" Return an array with the distance squared (in cells)\n between each source and the cluster centroid \"\"\"\n return self._dist2\n\n @property\n def objects(self):\n \"\"\" Return the objects associated with this cluster \"\"\"\n return self._objects\n\n def processCluster(self, subRegionData, pixelR2Cut):\n \"\"\" Function that is called recursively to\n split clusters until they:\n\n 1. Consist only of sources with the match radius of the cluster\n centroid.\n\n 2. Have at most one source per input catalog\n \"\"\"\n self._nSrc = self._catIndices.size\n self._nUnique = len(np.unique(self._catIndices))\n if self._nSrc == 0:\n print(\"Empty cluster\", self._nSrc, self._nUnique)\n return self._objects\n self.extract(subRegionData)\n if self._nSrc == 1:\n self._xCent = self.xCell[0]\n self._yCent = self.yCell[0]\n self._dist2 = np.zeros((1))\n self._rmsDist = 0.\n initialObject = self.addObject(subRegionData)\n initialObject.processObject(subRegionData, pixelR2Cut)\n self.clearTempData()\n return self._objects\n\n sumSnr = np.sum(self.snr)\n self._xCent = np.sum(self.xCell*self.snr) / sumSnr\n self._yCent = np.sum(self.yCell*self.snr) / sumSnr\n self._dist2 = (self._xCent - self.xCell)**2 + (self._yCent - self.yCell)**2\n self._rmsDist = np.sqrt(np.mean(self._dist2))\n \n initialObject = self.addObject(subRegionData)\n initialObject.processObject(subRegionData, pixelR2Cut)\n self.clearTempData()\n return self._objects\n\n def addObject(self, subRegionData, mask=None):\n \"\"\" Add a new object to this cluster \"\"\"\n newObject = subRegionData.addObject(self, mask)\n self._objects.append(newObject)\n return newObject\n\n\nclass ObjectData:\n \"\"\" Small class to define 'Objects', i.e., sets of associated sources \"\"\"\n\n def __init__(self, cluster, objectId, mask):\n \"\"\" Build from `ClusterData`, an objectId and mask specifying with sources\n in the cluster are part of the object \"\"\"\n self._parentCluster = cluster\n self._objectId = objectId\n if mask is None:\n self._mask = np.ones((self._parentCluster.nSrc), dtype=bool)\n else:\n self._mask = mask\n self._catIndices = self._parentCluster._catIndices[self._mask]\n self._nSrc = self._catIndices.size\n self._nUnique = np.unique(self._catIndices).size\n self._xCent = None\n self._yCent = None\n self._dist2 = None\n self._rmsDist = None\n\n @property\n def nSrc(self):\n \"\"\" Return the number of sources associated to the cluster \"\"\"\n return self._nSrc\n\n @property\n def nUnique(self):\n \"\"\" Return the number of catalogs contributing sources to the cluster \"\"\"\n return self._nUnique\n\n @property\n def dist2(self):\n \"\"\" Return an array with the distance squared (in cells)\n between each source and the cluster centroid \"\"\"\n return self._dist2\n\n def updateCatIndices(self):\n self._catIndices = self._parentCluster._catIndices[self._mask]\n self._nSrc = self._catIndices.size\n self._nUnique = np.unique(self._catIndices).size\n\n def sourceIds(self):\n return self._parentCluster.sourceIds[self._mask]\n \n def processObject(self, subRegionData, pixelR2Cut, recurse=0):\n \"\"\" Recursively process an object and make sub-objects \"\"\"\n if recurse > RECURSE_MAX:\n print(\"Recursion limit: \", self._nSrc, self._nUnique)\n return\n if self._nSrc == 0:\n print(\"Empty object\", self._nSrc, self._nUnique, recurse)\n return\n\n xCell = self._parentCluster.xCell[self._mask]\n yCell = self._parentCluster.yCell[self._mask]\n snr = self._parentCluster.snr[self._mask]\n\n if self._mask.sum() == 1:\n self._xCent = xCell[0]\n self._yCent = yCell[0]\n self._dist2 = np.zeros((1), float)\n self._rmsDist = 0.\n return\n\n sumSnr = np.sum(snr)\n self._xCent = np.sum(xCell*snr) / sumSnr\n self._yCent = np.sum(yCell*snr) / sumSnr\n self._dist2 = np.array((self._xCent - xCell)**2 + (self._yCent - yCell)**2)\n self._rmsDist = np.sqrt(np.mean(self._dist2))\n subMask = self._dist2 < pixelR2Cut\n if subMask.all():\n if self._nSrc != self._nUnique:\n self.splitObject(subRegionData, pixelR2Cut, recurse=recurse+1)\n return\n\n if not subMask.any():\n idx = np.argmax(snr)\n self._xCent = xCell[idx]\n self._yCent = yCell[idx]\n self._dist2 = np.array((self._xCent - xCell)**2 + (self._yCent - yCell)**2)\n self._rmsDist = np.sqrt(np.mean(self._dist2))\n subMask = self._dist2 < pixelR2Cut\n\n newObjMask = self._mask.copy()\n newObjMask[newObjMask] *= subMask\n\n newObject = self._parentCluster.addObject(subRegionData, newObjMask)\n newObject.processObject(subRegionData, pixelR2Cut)\n\n self._mask[self._mask] *= ~subMask\n self.updateCatIndices()\n self.processObject(subRegionData, pixelR2Cut, recurse=recurse+1)\n\n\n def splitObject(self, subRegionData, pixelR2Cut, recurse=0):\n \"\"\" Split up a cluster keeping only one source per input\n catalog, choosing the one closest to the cluster center \"\"\"\n sortIdx = np.argsort(self._dist2)\n mask = np.ones((self._nSrc), dtype=bool)\n usedCats = {}\n for iSrc, catIdx in zip(sortIdx, self._catIndices[sortIdx]):\n if catIdx not in usedCats:\n usedCats[catIdx] = 1\n continue\n else:\n usedCats[catIdx] += 1\n mask[iSrc] = False\n\n newObjMask = self._mask.copy()\n newObjMask[newObjMask] *= mask\n\n newObject = self._parentCluster.addObject(subRegionData, newObjMask)\n newObject.processObject(subRegionData, pixelR2Cut)\n\n self._mask[self._mask] *= ~mask\n self.updateCatIndices() \n self.processObject(subRegionData, pixelR2Cut, recurse=recurse+1)\n\n\nclass SubregionData:\n \"\"\" Class to analyze data for a SubRegion\n\n Include sub-region boundries, reduced data tables\n and clustering results\n\n Does not store sky maps\n\n Subregions are square sub-regions of the Skymap\n constructed with the WCS\n\n The subregion covers corner:corner+size\n\n The sources are projected into an array that extends `buf` cells\n beyond the region.\n\n Parameters\n ----------\n _data : `list`, [`Dataframe`]\n Reduced dataframes with only sources for this sub-region\n\n _clusterIds : `list`, [`np.array`]\n Matched arrays with the index of the cluster associated to each\n source. I.e., these could added to the Dataframes as\n additional columns\n\n _clusterDict : `dict`, [`int` : `ClusterData`]\n Dictionary with cluster membership data\n\n TODO: Add code to filter out clusters centered in the buffer\n \"\"\"\n def __init__(self, matcher, idOffset, corner, size, buf=10):\n self._matcher = matcher\n self._idOffset = idOffset # Offset used for the Object and Cluster IDs for this region\n self._corner = corner # cellX, cellY for corner of region\n self._size = size # size of region\n self._buf = buf\n self._minCell = corner - buf\n self._maxCell = corner + size + buf\n self._nCells = self._maxCell - self._minCell\n self._data = None\n self._nSrc = None\n self._footprintIds = None\n self._clusterDict = OrderedDict()\n self._objectDict = OrderedDict()\n\n def reduceData(self, data):\n \"\"\" Pull out only the data needed for this sub-region \"\"\"\n self._data = [self.reduceDataframe(val) for val in data]\n self._nSrc = sum([len(df) for df in self._data])\n \n @property\n def nClusters(self):\n \"\"\" Return the number of clusters in this region \"\"\"\n return len(self._clusterDict)\n\n @property\n def nObjects(self):\n \"\"\" Return the number of objects in this region \"\"\"\n return len(self._objectDict)\n\n @property\n def data(self):\n \"\"\" Return the data associated to this region \"\"\"\n return self._data\n\n @property\n def clusterDist(self):\n \"\"\" Return a dictionary mapping clusters Ids to clusters \"\"\"\n return self._clusterDict\n\n def reduceDataframe(self, dataframe):\n \"\"\" Filters dataframe to keep only source in the subregion \"\"\"\n xLocal = dataframe['xcell'] - self._minCell[0]\n yLocal = dataframe['ycell'] - self._minCell[1]\n filtered = (xLocal >= 0) & (xLocal < self._nCells[0]) & (yLocal >= 0) & (yLocal < self._nCells[1])\n red = dataframe[filtered].copy(deep=True)\n red['xlocal'] = xLocal[filtered]\n red['ylocal'] = yLocal[filtered]\n return red\n\n def countsMap(self, weightName=None):\n \"\"\" Fill a map that counts the number of source per cell \"\"\"\n toFill = np.zeros((self._nCells))\n for df in self._data:\n toFill += self.fillSubRegionFromDf(df, weightName=weightName)\n return toFill\n\n def associateSourcesToFootprints(self, clusterKey):\n \"\"\" Loop through data and associate sources to clusters \"\"\"\n self._footprintIds = [self.findClusterIds(df, clusterKey) for df in self._data]\n\n def buildClusterData(self, fpSet, pixelR2Cut=4.):\n \"\"\" Loop through cluster ids and collect sources into\n the ClusterData objects \"\"\"\n footprints = fpSet.getFootprints()\n footprintDict = {}\n nMissing = 0\n nFound = 0\n for iCat, (df, footprintIds) in enumerate(zip(self._data, self._footprintIds)):\n for srcIdx, (srcId, footprintId) in enumerate(zip(df['sourceId'], footprintIds)):\n if footprintId < 0:\n nMissing += 1\n continue\n if footprintId not in footprintDict:\n footprintDict[footprintId] = [(iCat, srcId, srcIdx)]\n else:\n footprintDict[footprintId].append((iCat, srcId, srcIdx))\n nFound += 1\n for footprintId, sources in footprintDict.items():\n footprint = footprints[footprintId]\n iCluster = footprintId+self._idOffset\n cluster = ClusterData(iCluster, footprint, np.array(sources).T)\n self._clusterDict[iCluster] = cluster\n cluster.processCluster(self, pixelR2Cut)\n\n def analyze(self, weightName=None, pixelR2Cut=4.):\n \"\"\" Analyze this sub-region\n\n Note that this returns the counts maps and clustering info,\n which can be helpful for debugging.\n \"\"\"\n if self._nSrc == 0:\n return None\n countsMap = self.countsMap(weightName)\n oDict = self.getFootprints(countsMap)\n oDict['countsMap'] = countsMap\n self.associateSourcesToFootprints(oDict['footprintKey'])\n self.buildClusterData(oDict['footprints'], pixelR2Cut)\n return oDict\n\n @staticmethod\n def findClusterIds(df, clusterKey):\n \"\"\" Associate sources to clusters using `clusterkey`\n which is a map where any pixel associated to a cluster\n has the cluster index as its value \"\"\"\n return np.array([clusterKey[yLocal,xLocal] for xLocal, yLocal in zip(df['xlocal'], df['ylocal'])]).astype(np.int32)\n\n def fillSubRegionFromDf(self, df, weightName=None):\n \"\"\" Fill a source counts map from a reduced dataframe for one input\n catalog \"\"\"\n if weightName is None:\n weights = None\n else:\n weights = df[weightName].values\n hist = np.histogram2d(df['xlocal'], df['ylocal'], bins=self._nCells,\n range=((0, self._nCells[0]),\n (0, self._nCells[1])),\n weights=weights)\n return hist[0]\n\n @staticmethod\n def filterFootprints(fpSet, buf):\n \"\"\" Remove footprints within `buf` cells of the region edge \"\"\"\n region = fpSet.getRegion()\n width, height = region.getWidth(), region.getHeight()\n outList = []\n maxX = width - buf\n maxY = height - buf\n for fp in fpSet.getFootprints():\n cent = fp.getCentroid()\n xC = cent.getX()\n yC = cent.getY()\n if xC < buf or xC > maxX or yC < buf or yC > maxY:\n continue\n outList.append(fp)\n fpSetOut = afwDetect.FootprintSet(fpSet.getRegion())\n fpSetOut.setFootprints(outList)\n return fpSetOut\n\n def getFootprints(self, countsMap):\n \"\"\" Take a source counts map and do clustering using Footprint detection\n \"\"\"\n image = afwImage.ImageF(countsMap.astype(np.float32))\n footprintsOrig = afwDetect.FootprintSet(image, afwDetect.Threshold(0.5))\n footprints = self.filterFootprints(footprintsOrig, self._buf)\n footprintKey = afwImage.ImageI(np.full(countsMap.shape, -1, dtype=np.int32))\n for i, footprint in enumerate(footprints.getFootprints()):\n footprint.spans.setImage(footprintKey, i, doClip=True)\n return dict(image=image, footprints=footprints, footprintKey=footprintKey)\n\n def getClusterAssociations(self):\n \"\"\" Convert the clusters to a set of associations \"\"\"\n clusterIds = []\n sourceIds = []\n distances = []\n for cluster in self._clusterDict.values():\n clusterIds.append(np.full((cluster.nSrc), cluster.iCluster, dtype=int))\n sourceIds.append(cluster.sourceIds)\n distances.append(cluster.dist2)\n if not distances:\n return Table(dict(distance=[], id=np.array([], int), object=np.array([], int)))\n distances = np.hstack(distances)\n distances = self._matcher.cellToArcsec() * np.sqrt(distances)\n data = dict(object=np.hstack(clusterIds),\n id=np.hstack(sourceIds),\n distance=distances)\n return Table(data)\n\n def getObjectAssociations(self):\n clusterIds = []\n objectIds = []\n sourceIds = []\n distances = []\n for obj in self._objectDict.values():\n clusterIds.append(np.full((obj._nSrc), obj._parentCluster.iCluster, dtype=int))\n objectIds.append(np.full((obj._nSrc), obj._objectId, dtype=int))\n sourceIds.append(obj.sourceIds())\n distances.append(obj.dist2)\n if not distances:\n return Table(dict(object=np.array([], int),\n parent=np.array([], int),\n id=np.array([], int),\n distance=[]))\n distances = np.hstack(distances)\n distances = self._matcher.cellToArcsec() * np.sqrt(distances) \n data = dict(object=np.hstack(objectIds),\n parent=np.hstack(clusterIds),\n id=np.hstack(sourceIds),\n distance=distances)\n return Table(data)\n\n def getClusterStats(self):\n \"\"\" Convert the clusters to a set of associations \"\"\"\n nClust = self.nClusters\n clusterIds = np.zeros((nClust), dtype=int)\n nSrcs = np.zeros((nClust), dtype=int)\n nObjects = np.zeros((nClust), dtype=int)\n nUniques = np.zeros((nClust), dtype=int)\n distRms = np.zeros((nClust), dtype=float)\n xCents = np.zeros((nClust), dtype=float)\n yCents = np.zeros((nClust), dtype=float)\n for idx, cluster in enumerate(self._clusterDict.values()):\n clusterIds[idx] = cluster._iCluster\n nSrcs[idx] = cluster.nSrc\n nObjects[idx] = len(cluster._objects)\n nUniques[idx] = cluster.nUnique\n distRms[idx] = cluster._rmsDist\n xCents[idx] = cluster._xCent\n yCents[idx] = cluster._yCent\n ra, decl = self._matcher.cellToWorld(xCents, yCents)\n distRms *= self._matcher.cellToArcsec()\n\n data = dict(clusterIds=clusterIds,\n nSrcs=nSrcs,\n nObject=nObjects,\n nUnique=nUniques,\n distRms=distRms,\n ra=ra,\n decl=decl)\n\n return Table(data)\n\n def getObjectStats(self):\n \"\"\" Convert the clusters to a set of associations \"\"\"\n nObj = self.nObjects\n clusterIds = np.zeros((nObj), dtype=int)\n objectIds = np.zeros((nObj), dtype=int)\n nSrcs = np.zeros((nObj), dtype=int)\n distRms = np.zeros((nObj), dtype=float)\n xCents = np.zeros((nObj), dtype=float)\n yCents = np.zeros((nObj), dtype=float)\n for idx, obj in enumerate(self._objectDict.values()):\n clusterIds[idx] = obj._parentCluster._iCluster\n objectIds[idx] = obj._objectId\n nSrcs[idx] = obj.nSrc\n distRms[idx] = obj._rmsDist\n xCents[idx] = obj._xCent\n yCents[idx] = obj._yCent\n\n ra, decl = self._matcher.cellToWorld(xCents, yCents)\n distRms *= self._matcher.cellToArcsec()\n \n data = dict(clusterIds=clusterIds,\n objectIds=objectIds,\n nSrcs=nSrcs,\n distRms=distRms,\n ra=ra,\n decl=decl)\n\n return Table(data)\n\n def addObject(self, cluster, mask=None):\n \"\"\" Add an object to this sub-region \"\"\"\n objectId = self.nObjects + self._idOffset\n newObject = ObjectData(cluster, objectId, mask)\n self._objectDict[objectId] = newObject\n return newObject\n\n\nclass NWayMatch:\n \"\"\" Class to do N-way matching\n\n Uses a provided WCS to define a Skymap that covers the full region\n begin matched.\n\n Uses that WCS to assign cell locations to all sources in the input catalogs\n\n Iterates over sub-regions and does source clustering in each sub-region\n using Footprint detection on a Skymap of source counts per cell.\n\n Assigns each input source to a cluster.\n\n At that stage the clusters are not the final product as they can include\n more than one soruce from a given catalog.\n\n Loops over clusters and processes each cluster to:\n\n 1. Remove outliers outside the match radius w.r.t. the cluster centroid.\n 2. Resolve cases of confusion, where multiple sources from a single\n catalog contribute to a cluster.\n\n Parameters\n ----------\n _redData : `list`, [`Dataframe`]\n Reduced dataframes with only the columns needed for matching\n\n _clusters : `OrderedDict`, [`tuple`, `SubregionData`]\n Dictionary providing access to subregion data\n \"\"\"\n\n def __init__(self, matchWcs, **kwargs):\n self._wcs = matchWcs\n self._cellSize = self._wcs.wcs.cdelt[1]\n self._nCellSide = np.ceil(2*np.array(self._wcs.wcs.crpix)).astype(int)\n self._subRegionSize = kwargs.get('subRegionSize', 3000)\n self._subRegionBuffer = kwargs.get('subRegionBuffer', 10)\n self._subregionMaxObject = kwargs.get('subregionMaxObject', 100000)\n self._pixelR2Cut = kwargs.get('pixelR2Cut', 1.0)\n self._nSubRegion = np.ceil(self._nCellSide/self._subRegionSize)\n self._redData = OrderedDict()\n self._clusters = None\n\n def cellToArcsec(self):\n return 3600. * self._cellSize\n\n def cellToWorld(self, xCell, yCell):\n return self._wcs.wcs_pix2world(xCell, yCell, 0)\n \n @classmethod\n def create(cls, refDir, regionSize, cellSize, **kwargs):\n \"\"\" Make an `NWayMatch` object from inputs \"\"\"\n nCell = (np.array(regionSize)/cellSize).astype(int)\n matchWcs = createGlobalWcs(refDir, cellSize, nCell)\n return cls(matchWcs, **kwargs)\n\n @property\n def redData(self):\n \"\"\" Return the dictionary of reduced data, i.e., just the columns\n need for matching \"\"\"\n return self._redData\n\n @property\n def nSubRegion(self):\n \"\"\" Return the number of sub-regions in X,Y \"\"\"\n return self._nSubRegion\n\n def reduceData(self, inputFiles, visitIds):\n \"\"\" Read input files and filter out only the columns we need \"\"\"\n for fName, vid in zip(inputFiles, visitIds):\n self._redData[vid] = self.reduceDataFrame(fName)\n\n def reduceDataFrame(self, fName):\n \"\"\" Read and reduce a single input file \"\"\"\n parq = pq.read_pandas(fName, columns=COLUMNS)\n df = parq.to_pandas()\n df['SNR'] = df['PsFlux']/df['PsFluxErr']\n # select sources that have SNR > 5.\n # You may start with 10 or even 50 if you want to start with just the brightest objects\n # AND\n # Centroid_flag is True if there was a problem fitting the position (centroid)\n # AND\n # sky_source is True if it is a measurement of blank sky.\n # sky_sources should have SNR < 5 or the Centroid_flag set,\n # but explicitly filter just to make sure.\n # AND\n # detect_isPrimary = True to remove duplicate rows from deblending:\n # If a source has been deblended, the parent is marked detect_isPrimary=False and its children True.\n df_clean = df[(df.SNR > 5) & ~df.Centroid_flag & ~df.sky_source & df.detect_isPrimary]\n xcell, ycell = self._wcs.wcs_world2pix(df_clean['ra'].values, df_clean['decl'].values, 0)\n df_red = df_clean[[\"ra\", \"decl\", \"SNR\", \"sourceId\"]].copy(deep=True)\n df_red['xcell'] = xcell\n df_red['ycell'] = ycell\n return df_red[[\"ra\", \"decl\", \"SNR\", \"sourceId\", \"xcell\", \"ycell\"]]\n\n def reduceCatalog(self, catalog):\n \"\"\" Reduce a catalog \"\"\"\n raise NotImplementedError()\n\n def add(self, catalog, vid):\n \"\"\" Add a catalog to the data set being matched \"\"\"\n self._redData[vid] = self.reduceCatalog(catalog)\n\n def getIdOffset(self, ix, iy):\n \"\"\" Get the ID offset to use for a given sub-region \"\"\"\n subRegionIdx = self._nSubRegion[1]*ix + iy\n return int(self._subregionMaxObject * subRegionIdx)\n\n def analyzeSubregion(self, ix, iy, fullData=False):\n \"\"\" Analyze a single subregion\n\n Returns an OrderedDict\n\n 'srd' : `SubregionData`\n The analysis data for the sub-region\n\n if fullData is True the return dict will include\n\n 'image' : `afwImage.ImageI`\n Image of subregion source counts map\n 'countsMap' : `np.array`\n Numpy array with same\n 'clusters' : `afwDetect.FootprintSet`\n Clusters as dectected by finding FootprintSet on source counts map\n 'clusterKey' : `afwImage.ImageI`\n Map of subregion with pixels filled with index of\n associated Footprints\n \"\"\"\n iSubRegion = np.array([ix, iy])\n corner = iSubRegion * self._subRegionSize\n idOffset = self.getIdOffset(ix, iy)\n srd = SubregionData(self, idOffset, corner, self._subRegionSize, self._subRegionBuffer)\n srd.reduceData(self._redData.values())\n oDict = srd.analyze(pixelR2Cut=self._pixelR2Cut)\n if oDict is None:\n return None\n if fullData:\n oDict['srd'] = srd\n return oDict\n if srd.nObjects >= self._subregionMaxObject:\n print(\"Too many object in a subregion\", srd.nObjects, elf._subregionMaxObject)\n return dict(srd=srd)\n\n def finish(self):\n \"\"\" Does clusering for all subregions\n\n Does not store source counts maps for the counts regions\n \"\"\"\n self._clusters = OrderedDict()\n nAssoc = 0\n clusterAssocTables = []\n objectAssocTables = []\n clusterStatsTables = []\n objectStatsTables = []\n\n for ix in range(int(self._nSubRegion[0])):\n sys.stdout.write(\"%2i \" % ix)\n sys.stdout.flush()\n for iy in range(int(self._nSubRegion[1])):\n sys.stdout.write('.')\n sys.stdout.flush()\n iSubRegion = (ix, iy)\n odict = self.analyzeSubregion(ix, iy)\n if odict is None:\n continue\n subregionData = odict['srd']\n self._clusters[iSubRegion] = subregionData\n clusterAssocTables.append(subregionData.getClusterAssociations())\n objectAssocTables.append(subregionData.getObjectAssociations())\n clusterStatsTables.append(subregionData.getClusterStats())\n objectStatsTables.append(subregionData.getObjectStats())\n \n sys.stdout.write('!\\n')\n\n sys.stdout.write(\"Making association vectors\\n\")\n hduList = fits.HDUList([fits.PrimaryHDU(),\n fits.table_to_hdu(vstack(clusterAssocTables)),\n fits.table_to_hdu(vstack(objectAssocTables)),\n fits.table_to_hdu(vstack(clusterStatsTables)),\n fits.table_to_hdu(vstack(objectStatsTables))])\n return hduList\n\n def allStats(self):\n \"\"\" Helper function to print info about clusters \"\"\"\n stats = np.zeros((4), int)\n for key, srd in self._clusters.items():\n subRegionStats = clusterStats(srd._clusterDict)\n print(\"%3i, %3i: %8i %8i %8i %8i\" % (key[0], key[1], subRegionStats[0], subRegionStats[1], subRegionStats[2], subRegionStats[3]))\n stats += subRegionStats\n return stats\n\ndef main():\n \"\"\" Example usage \"\"\"\n\n DATADIR = \".\"\n SOURCE_TABLEFILES = glob.glob(os.path.join(DATADIR, \"sourceTable-*.parq\"))\n VISIT_IDS = np.arange(len(SOURCE_TABLEFILES))\n\n REF_DIR = (150., 2.) # RA, DEC in deg\n REGION_SIZE = (3., 3.) # in Deg\n #CELL_SIZE = 5.0e-5 # in Deg\n CELL_SIZE = 1. / (3600*2) # in Deg\n #SUBREGION_SIZE = 2700 # in Pixels\n SUBREGION_SIZE = 1350 # in Pixels\n PIXEL_R2CUT = 1.\n \n t0 = time.time()\n nWay = NWayMatch.create(REF_DIR, REGION_SIZE, CELL_SIZE, pixelR2Cut=PIXEL_R2CUT, subRegionSize=SUBREGION_SIZE)\n print(\"Building clusters in %ix%i sub-regions\" % (nWay.nSubRegion[0], nWay.nSubRegion[1]))\n nWay.reduceData(SOURCE_TABLEFILES, VISIT_IDS)\n outTables = nWay.finish()\n t1 = time.time()\n print(\"Reading and clustering took %s s\" % (t1-t0))\n\n print(\"Cluster Summaries for sub-regions\")\n print(\"Region : nCluster nOrphan nMixed nConf\")\n stats = nWay.allStats()\n print(\"Total: %8i %8i %8i %8i\" % (stats[0], stats[1], stats[2], stats[3]))\n\n outTables.writeto(\"out.fits\", overwrite=True)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.hstack", "numpy.sqrt", "numpy.unique", "numpy.ones", "numpy.full", "numpy.ceil", "numpy.argmax", "numpy.mean", "numpy.argsort", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.histogram2d" ] ]
ali-senguel/fairo-explore
[ "893481da270eed1e6d504c71e483d685ca9218d1" ]
[ "polymetis/polymetis/python/polymetis/robot_client/metadata.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom typing import List\nfrom dataclasses import dataclass\nimport io\n\nimport torch\n\nimport polymetis\nimport polymetis_pb2\nfrom polymetis.utils.data_dir import get_full_path_to_urdf\nfrom torchcontrol.policies.default_controller import DefaultController\n\n\n@dataclass\nclass RobotModelConfig:\n \"\"\"Dataclass that holds relevant information for the robot model.\"\"\"\n\n robot_description_path: str\n controlled_joints: List[float]\n num_dofs: int\n ee_link_idx: int\n ee_link_name: str\n rest_pose: List[float]\n joint_limits_low: List[float]\n joint_limits_high: List[float]\n joint_damping: List[float]\n torque_limits: List[float]\n\n\n@dataclass\nclass RobotClientMetadataConfig:\n \"\"\"Dataclass holding full RobotClientMetadata, required for instantiatinga RobotClient with the server.\"\"\"\n\n default_Kq: List[float]\n default_Kqd: List[float]\n hz: int\n robot_model: RobotModelConfig\n\n\nclass RobotClientMetadata:\n \"\"\"Container class to hold all necessary metadata for the RobotClient.\n\n Constructs a container for the metadata by creating a default controller,\n loading the URDF file associated with the robot model and reading it into\n the metadata, and constructing the final Protobuf message containing\n the information necessary to instantiate a client that connects to the\n server.\n\n Args:\n default_Kq: Default position gains for the robot.\n\n default_Kdq: Default velocity gains for the robot.\n\n hz: Frequency the robot is running at.\n\n robot_model_cfg: A dataclass containing all the info necessary\n for a urdf model of the robot.\n\n \"\"\"\n\n def __init__(\n self,\n default_Kq: List[float],\n default_Kqd: List[float],\n hz: int,\n robot_model_cfg: RobotModelConfig,\n ):\n # Generate default controller and convert to TorchScript binary\n default_controller = DefaultController(Kq=default_Kq, Kqd=default_Kqd)\n buffer = io.BytesIO()\n torch.jit.save(torch.jit.script(default_controller), buffer)\n buffer.seek(0)\n default_controller_jitted = buffer.read()\n\n # Create RobotClientMetadata\n robot_client_metadata = polymetis_pb2.RobotClientMetadata()\n robot_client_metadata.hz = hz\n robot_client_metadata.dof = robot_model_cfg.num_dofs\n robot_client_metadata.ee_link_name = robot_model_cfg.ee_link_name\n robot_client_metadata.ee_link_idx = robot_model_cfg.ee_link_idx\n\n # Set gains as shared metadata\n robot_client_metadata.default_Kq[:] = default_Kq\n robot_client_metadata.default_Kqd[:] = default_Kqd\n robot_client_metadata.rest_pose[:] = robot_model_cfg.rest_pose\n\n # Set default controller for controller manager server\n robot_client_metadata.default_controller = default_controller_jitted\n\n # Load URDF file\n full_urdf_path = get_full_path_to_urdf(robot_model_cfg.robot_description_path)\n with open(full_urdf_path, \"r\") as file:\n robot_client_metadata.urdf_file = file.read()\n\n # Set version\n robot_client_metadata.polymetis_version = polymetis.__version__\n\n self.metadata_proto = robot_client_metadata\n\n def __repr__(self):\n return f\"Contains protobuf message {type(self.metadata_proto)}:\\n{str(self.metadata_proto)}\"\n\n def serialize(self) -> bytes:\n \"\"\"Returns a byte-serialized version of the underlying protobuf message.\"\"\"\n return self.metadata_proto.SerializeToString()\n\n def get_proto(self):\n \"\"\"Returns the underlying protobuf message.\"\"\"\n return self.metadata_proto\n" ]
[ [ "torch.jit.script" ] ]
BrisClimate/flood-cascade
[ "660c29275a87785153d0f107ed23104fcbcbddee" ]
[ "submit_scripts/fuse_gridded/generate_param_maps_3choices.py" ]
[ "# Script to produce maps of FUSE parameter sets, based on parameter transfer / regionalisation\n# Peter Uhe\n# June 2019\n#\n\nimport numpy as np\nimport pickle,glob,os,sys\nfrom netCDF4 import Dataset\n# Import libraries for plotting:\nimport matplotlib.pyplot as plt\nimport matplotlib.cm\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\n\n#########################################################################################\n# Model configuration names\ndec = 902\nsetup_name = 'GBM-p1deg'\n# Name of observations (ideally would be the same, but they are renamed here)\ncalibversion0 = 'mswep-p1deg' # string for observations used in catchment calibration\ncalibversion1 = 'MSWEP2-2-ERA5-calibrated' # string for observations used in GBM gridded model\n\n# Path for gridded model:\ngriddir = '/work/pu17449/fuse/'+setup_name\n\n# Path for catchment models:\nbasedir = '/work/pu17449/fuse/p1deg_catchments'\n\n# Pickle file specifying donor catchments\n# (calculated by script FUSE_regionalization/parameter_transfer_p1deg-GBM_distances_v1-1.py)\nf_donors = '/home/pu17449/src/setup_scripts/fuse_GBM/GBM-p1deg_distances_GBM-reduced.pkl'\n\n# Elev bands file is used to extract grid and mask information.\nf_grid = os.path.join(griddir,'input/'+setup_name+'_elev_bands.nc')\n\n#########################################################################################\n\n# Load grid\nwith Dataset(f_grid,'r') as f:\n\tlons = f.variables['longitude'][:]\n\tlats = f.variables['latitude'][:]\n\t# Variable with mask for region simulated\n\tgridref = f.variables['mean_elev'][0,:]\n\n# Load donor catchments\nwith open(f_donors,'rb') as f:\n\tdonor_catchments = pickle.load(f)\n\n# Get array shape\nnz,ny,nx = donor_catchments.shape\n\n# Convert donor catchments to masked array\ndonor_catchments = np.ma.masked_where(donor_catchments==-1,donor_catchments)\n\nfor choice in range(3):\n\tprint('donor catchment choice:',choice)\n\tf_out = os.path.join(griddir,'output/'+setup_name+'_'+str(dec)+'_'+calibversion1+str(choice+1)+'.nc')\n\n\t# find best donor for each grid point (first in list after removing masked points)\n\tdonor_indices = {}\n\tbest_donor = np.ones([ny,nx],dtype=int)*-1\n\tfor i in range(nx):\n\t\tfor j in range(ny):\n\t\t\t# compress to remove masked values, then take first point\n\t\t\tcatchs = donor_catchments[:,j,i].compressed()\n\t\t\tif gridref[j,i] is not np.ma.masked:\n\t\t\t\tif len(catchs>choice):\n\t\t\t\t\tcatch = catchs[choice]\n\t\t\t\t\tbest_donor[j,i] = catch\n\t\t\t\t\tif catch in donor_indices:\n\t\t\t\t\t\tdonor_indices[catch].append((j,i))\n\t\t\t\t\telse:\n\t\t\t\t\t\tdonor_indices[catch] = [(j,i)]\n\t\t\t\telse:\n\t\t\t\t\traise Exception('Error, not enough donor catchments. Point: '+str(j)+','+str(i))\n\t##########################################################################\n\n\n\tparam_longname = {}\n\tparam_units = {}\n\t# Get list of parameters from an example parameter file\n\tcatchment = donor_indices.keys()[0]\n\tf_param = os.path.join(basedir,'fuse_grdc_'+str(catchment),'output','grdc_'+str(catchment)+'_'+str(dec)+'_'+calibversion0+'_para_sce.nc')\n\tprint('Reading template parameter file and geting metadata:',f_param)\n\twith Dataset(f_param,'r') as f:\n\t\tparam_list = list(f.variables.keys())\n\t\tfor param in param_list:\n\t\t\tparam_longname[param] = f.variables[param].long_name\n\t\t\tparam_units[param] = f.variables[param].units\n\n\n\tprint('params',sorted(param_list))\n\tparam_maps = {}\n\t# initialise parameter maps\n\tfor param in param_list:\n\t\tparam_maps[param] = np.ones([ny,nx],dtype=np.float32)*-9999\n\t\tparam_longname\n\n\tfor catchment,indices in donor_indices.items():\n\t\tf_param = os.path.join(basedir,'fuse_grdc_'+str(catchment),'output','grdc_'+str(catchment)+'_'+str(dec)+'_'+calibversion0+'_para_sce.nc')\n\t\tprint('catchment',catchment,f_param)\n\t\twith Dataset(f_param,'r') as f:\n\t\t\t# First get best trial from calibration (use raw_rmse)\n\t\t\trmse = f.variables['raw_rmse'][:].compressed()\n\t\t\tif len(rmse)>0:\n\t\t\t\tbesttry = rmse.argmin()\n\t\t\t\tnumtrials = len(rmse)\n\t\t\t\tprint('best parameter (lowest RMSE) is from trial',besttry,'of',numtrials)\n\t\t\t\tprint('NOTE: now using last trial when parameters have converged, even if not the lowest RMSE')\n\t\t\t\tfor param in param_list:\n\t\t\t\t\t#catch_param = f.variables[param][besttry]\n\t\t\t\t\tcatch_param = f.variables[param][numtrials-1]\n\t\t\t\t\tfor point in indices:\n\t\t\t\t\t\tparam_maps[param][point[0],point[1]] = catch_param\n\t\t\telse:\n\t\t\t\tprint('Error, no valid data for catchment',catchment)\n\n\t# Write grid to output\n\twith Dataset(f_out,'w') as f_out:\n\n\t\tf_out.createDimension('latitude',len(lats))\n\t\tf_out.createVariable('latitude',np.float,('latitude'))\n\t\tf_out.variables['latitude'].standard_name = \"latitude\"\n\t\tf_out.variables['latitude'].long_name = \"latitude\"\n\t\tf_out.variables['latitude'].units = \"degrees_north\"\n\t\tf_out.variables['latitude'].axis = \"Y\"\n\t\tf_out.variables['latitude'][:] = lats\n\n\t\tf_out.createDimension('longitude',len(lons))\n\t\tf_out.createVariable('longitude',np.float,('longitude'))\n\t\tf_out.variables['longitude'].standard_name = \"longitude\"\n\t\tf_out.variables['longitude'].long_name = \"longitude\"\n\t\tf_out.variables['longitude'].units = \"degrees_east\"\n\t\tf_out.variables['longitude'].axis = \"X\"\n\t\tf_out.variables['longitude'][:] = lons\n\n\t\tfor param,field in param_maps.items():\n\t\t\t# Simple selection of output paramsdd\n\t\t\t#if param[0].isupper()\n\t\t\tvar = f_out.createVariable(param,np.float,('latitude','longitude'),fill_value=-9999)\n\t\t\tvar.long_name = param_longname[param]\n\t\t\tvar.units = param_units[param]\n#\t\t\tvar[:] = np.ma.masked_where(field==-1,field)\n\t\t\tvar[:] = field\n\n\n\t############################################################################\n\t# Plot stuff\n\tif not os.path.exists('figs'):\n\t\tos.mkdir('figs')\n\tfig=plt.figure()\n\tax = fig.add_subplot(1,1,1,projection=ccrs.PlateCarree())\n\tax.set_extent([73.25, 97.75, 22.25, 31.25], crs=ccrs.PlateCarree())\n\t#ax.pcolormesh(lons,lats,distarray[0,::-1,:],vmin=0,vmax=1,cmap='jet')\n\tmarr = np.ma.masked_where(param_maps['raw_rmse']==-1,param_maps['raw_rmse'])\n\tcm = ax.pcolormesh(lons,lats,marr,cmap='jet',vmin=0,vmax=5,transform=ccrs.PlateCarree())\n\tax.coastlines()\n\tax.add_feature(cfeature.BORDERS)\n\tplt.colorbar(cm,ax=ax,shrink=0.3)\n\tplt.title('RMSE of donor catchment from calibration')\n\t#plt.show()\n\tplt.savefig('figs/calib_RMSE_donor.png')\n" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.savefig", "numpy.ones", "matplotlib.pyplot.colorbar", "numpy.ma.masked_where", "matplotlib.pyplot.figure" ] ]
bjrnfrdnnd/panel-test
[ "4609a259e749825b2a2012d8a7e48ed8e8a78deb" ]
[ "nmrtools/plt.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom nmrtools.math import lorentz\n\n\"\"\"TODO: rethink plot routines. There are two main flavors:\n * plot peaklists, either as stick plots or with Lorentzians\n * plot lineshapes\n \nDifferent routes:\n peaklist -> stick plot\n peaklist -> Lorentzian lineshape\n DNMR simulation -> lineshape\n \nConsider function factories that will crank out the desired plot object.\n\nPeaklists may be np.arrays, or lists of tuples, depending on origin. Either\nuse a consistent form throughout (e.g. np.array, converting users array-like\nobjects as needed) or refactor to allow multiple inputs.\n\"\"\"\n\n\ndef add_signals(linspace, peaklist, w):\n \"\"\"\n Given a numpy linspace, a spectrum as a list of (frequency, intensity)\n tuples, and a linewidth, returns an array of y coordinates for the\n total line shape.\n\n Arguments\n ---------\n linspace : array-like\n normally a numpy.linspace of x coordinates corresponding to frequency\n in Hz.\n peaklist : [(float, float)...]\n a list of (frequency, intensity) tuples.\n w : float\n peak width at half maximum intensity.\n\n Returns\n -------\n [float...]\n an array of y coordinates corresponding to intensity.\n \"\"\"\n # TODO: consider naming, and confusion with .math.add_peaks\n # TODO: function looks clunky. Refactor?\n result = lorentz(linspace, peaklist[0][0], peaklist[0][1], w)\n for v, i in peaklist[1:]:\n result += lorentz(linspace, v, i, w)\n return result\n\n\ndef mplplot(peaklist, w=1, y_min=-0.01, y_max=1, points=800, limits=None):\n \"\"\"\n A no-frills routine that plots spectral simulation data.\n\n Arguments\n ---------\n peaklist : [(float, float)...]\n a list of (frequency, intensity) tuples.\n w : float\n peak width at half height\n y_max : float or int\n maximum intensity for the plot.\n points : int\n number of data points\n limits : (float, float)\n frequency limits for the plot\n \"\"\"\n # import matplotlib.pyplot as plt\n\n peaklist.sort() # Could become costly with larger spectra\n if limits:\n try:\n l_limit, r_limit = limits\n l_limit = float(l_limit)\n r_limit = float(r_limit)\n except Exception as e:\n print(e)\n print('limits must be a tuple of two numbers')\n # return None\n raise\n if l_limit > r_limit:\n l_limit, r_limit = r_limit, l_limit\n else:\n l_limit = peaklist[0][0] - 50\n r_limit = peaklist[-1][0] + 50\n x = np.linspace(l_limit, r_limit, points)\n plt.ylim(y_min, y_max)\n plt.gca().invert_xaxis() # reverses the x axis\n y = add_signals(x, peaklist, w)\n # noinspection PyTypeChecker\n plt.plot(x, y)\n plt.show()\n return x, y\n # TODO: or return plt? Decide behavior\n\n\ndef mplplot_stick(peaklist, y_min=-0.01, y_max=1, limits=None):\n \"\"\"TODO: description below incorrect. x, y must be numpy.ndarray.\n Decide on a consistent interface (e.g. vs. mplplot).\n Also: setting limits by adding small peaks is hacky, and also doesn't work\n if peaklist isn't ordered.\n \"\"\"\n \"\"\"\n matplotlib plot a spectrum in \"stick\" (stem) style.\n\n Arguments\n ---------\n x : [float...]\n a list of frequencies\n y : [float...]\n a list of intensities corresponding with x\n max_y : float\n maximum intensity for the plot.\n \"\"\"\n # import matplotlib.pyplot as plt\n\n fig, ax = plt.subplots()\n if limits:\n try:\n l_limit, r_limit = limits\n l_limit = float(l_limit)\n r_limit = float(r_limit)\n except Exception as e:\n print(e)\n print('limits must be a tuple of two numbers')\n raise\n if l_limit > r_limit:\n l_limit, r_limit = r_limit, l_limit\n else:\n l_limit = peaklist[0][0] - 50\n r_limit = peaklist[-1][0] + 50\n x, y = zip(*peaklist)\n x = np.append(x, [l_limit, r_limit])\n y = np.append(y, [0.001, 0.001])\n plt.xlim(r_limit, l_limit)\n plt.ylim(y_min, y_max)\n ax.stem(x, y, markerfmt=' ', basefmt='C0-')\n ax.invert_xaxis()\n plt.show()\n return x, y\n\n\ndef mplplot_lineshape(x, y, y_min=None, y_max=None, limits=None):\n # fig, ax = plt.subplots()\n\n if limits:\n try:\n l_limit, r_limit = limits\n l_limit = float(l_limit)\n r_limit = float(r_limit)\n except Exception as e:\n print(e)\n print('limits must be a tuple of two numbers')\n raise\n if l_limit > r_limit:\n l_limit, r_limit = r_limit, l_limit\n else:\n l_limit = x[0] # assumes x already sorted low->high\n r_limit = x[-1]\n\n if y_min is None or y_max is None: # must test vs None so that 0 = True\n margin = max(y) * 0.1\n if y_min is None:\n y_min = min(y) - margin\n if y_max is None:\n y_max = max(y) + margin\n plt.xlim(r_limit, l_limit) # should invert x axis\n plt.ylim(y_min, y_max)\n plt.plot(x, y)\n plt.show()\n return x, y\n" ]
[ [ "matplotlib.pyplot.gca", "numpy.linspace", "matplotlib.pyplot.ylim", "matplotlib.pyplot.subplots", "matplotlib.pyplot.plot", "numpy.append", "matplotlib.pyplot.xlim", "matplotlib.pyplot.show" ] ]
samadejacobs/moses
[ "1fda9a06ef645f533191990cd10834e52ec29a37" ]
[ "scripts/run.py" ]
[ "import argparse\nimport os\nimport pandas as pd\nimport importlib.util\nimport sys\n\nfrom moses.models_storage import ModelsStorage\n\ndef load_module(name, path):\n spec = importlib.util.spec_from_file_location(name, path)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n\n return module\n\n\nMODELS = ModelsStorage()\n\n\nsplit_dataset = load_module(\"split_dataset\", \"split_dataset.py\")\neval_script = load_module(\"eval\", \"metrics/eval.py\")\ntrainer_script = load_module(\"train\", \"train.py\")\nsampler_script = load_module(\"sample\", \"sample.py\")\n\n\ndef get_model_path(config, model):\n return os.path.join(\n config.checkpoint_dir, model + config.experiment_suff + \"_model.pt\"\n )\n\n\ndef get_config_path(config, model):\n return os.path.join(\n config.checkpoint_dir, model + config.experiment_suff + \"_config.pt\"\n )\n\n\ndef get_vocab_path(config, model):\n return os.path.join(\n config.checkpoint_dir, model + config.experiment_suff + \"_vocab.pt\"\n )\n\n\ndef get_generation_path(config, model):\n if(config.lbann_weights_dir):\n return os.path.join(config.lbann_weights_dir + 'e' + str(config.lbann_epoch_counts) + model + config.experiment_suff + '_generated.csv')\n else :\n return os.path.join(config.data_dir, model + config.experiment_suff + '_generated.csv')\n\ndef get_reconstruction_path(config, model):\n if(config.lbann_weights_dir):\n return os.path.join(config.lbann_weights_dir + 'e' + str(config.lbann_epoch_counts) + model + config.experiment_suff + '_predicted.csv')\n else :\n return os.path.join(config.data_dir, model + config.experiment_suff + '_predicted.csv')\n\n\ndef get_device(config):\n return f\"cuda:{config.gpu}\" if config.gpu >= 0 else \"cpu\"\n\n\ndef get_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--model\",\n type=str,\n default=\"all\",\n choices=[\"all\"] + MODELS.get_model_names(),\n help=\"Which model to run\",\n )\n parser.add_argument(\n \"--data_dir\", type=str, default=\"../data\", help=\"Directory for datasets\"\n )\n parser.add_argument(\n \"--checkpoint_dir\",\n type=str,\n default=\"./checkpoints\",\n help=\"Directory for checkpoints\",\n )\n parser.add_argument(\n \"--n_samples\", type=int, default=1000, help=\"Number of samples to sample\"\n )\n parser.add_argument(\"--n_jobs\", type=int, default=1, help=\"Number of threads\")\n parser.add_argument(\"--gpu\", type=int, default=-1, help=\"GPU index (-1 for cpu)\")\n parser.add_argument(\n \"--metrics\",\n type=str,\n default=\"metrics.csv\",\n help=\"Path to output file with metrics\",\n )\n parser.add_argument(\n \"--train_size\", type=int, default=None, help=\"Size of training dataset\"\n )\n parser.add_argument(\n \"--test_size\", type=int, default=None, help=\"Size of testing dataset\"\n )\n parser.add_argument(\n \"--experiment_suff\",\n type=str,\n default=\"\",\n help=\"Experiment suffix to break ambiguity\",\n )\n parser.add_argument(\n \"--vocab-path\", type=str, default=\"\", help=\"path to experiment vocabulary\"\n )\n parser.add_argument(\n \"--model-path\", type=str, default=\"\", help=\"path to experiment model\"\n )\n parser.add_argument(\n \"--config-path\", type=str, default=\"\", help=\"path to experiment config\"\n )\n parser.add_argument(\n \"--lbann_weights_dir\",\n type=str,\n default='',\n help=\"Directory for LBANN weights for inference\",\n )\n parser.add_argument(\n \"--lbann_epoch_counts\",\n type=int,\n default=-1,\n required=False,\n help=\"LBANN epoch count at which to load trained model,not required if only one trained model is available\",\n )\n parser.add_argument(\"--save_reconstruction\", action='store_true')\n return parser\n\n\n# UNDER CONSTRUCTION FOR COMPATIBILITY WITH LOADING AND TRAINING LBANN MODELS\ndef train_model(config, model, train_path):\n model_path = get_model_path(config, model)\n config_path = get_config_path(config, model)\n vocab_path = get_vocab_path(config, model)\n\n #vocab_path = config.vocab_path \n\n if os.path.exists(model_path) and \\\n os.path.exists(config_path) and \\\n os.path.exists(vocab_path):\n return\n\n trainer_parser = trainer_script.get_parser()\n trainer_config = trainer_parser.parse_known_args([model,] + sys.argv[1:] + [\n '--device', get_device(config),\n '--train_load', train_path,\n '--model_save', model_path,\n '--config_save', config_path,\n '--vocab_save', vocab_path,\n '--n_jobs', str(config.n_jobs)])[0]\n trainer_script.main(model, trainer_config)\n\n\ndef sample_from_model(config, model,test_path):\n model_path = config.model_path if config.model_path != '' else get_model_path(config, model)\n config_path = config.config_path if config.config_path != '' else get_config_path(config, model)\n vocab_path = config.vocab_path if config.vocab_path != '' else get_vocab_path(config, model)\n\n assert os.path.exists(\n model_path\n ), \"Can't find model path for sampling: '{}'\".format(model_path)\n assert os.path.exists(\n config_path\n ), \"Can't find config path for sampling: '{}'\".format(config_path)\n assert os.path.exists(\n vocab_path\n ), \"Can't find vocab path for sampling: '{}'\".format(vocab_path)\n\n if config.lbann_weights_dir:\n assert os.path.exists(config.lbann_weights_dir), (\n \"LBANN inference mode is specified but directory \"\n \" to load weights does not exist: '{}'\".format(config.lbann_weights_dir)\n )\n\n sampler_parser = sampler_script.get_parser()\n sampler_config = sampler_parser.parse_known_args(\n [model,]\n + sys.argv[1:]\n + [\n \"--device\",\n get_device(config),\n \"--model_load\",\n model_path,\n \"--config_load\",\n config_path,\n \"--vocab_load\",\n vocab_path,\n \"--lbann_weights_dir\",\n str(config.lbann_weights_dir),\n \"--lbann_epoch_counts\",\n str(config.lbann_epoch_counts),\n \"--gen_save\",\n get_generation_path(config, model),\n \"--pred_save\",\n get_reconstruction_path(config, model),\n \"--n_samples\",\n str(config.n_samples),\n '--test_path', \n test_path,\n ]\n )[0]\n sampler_script.main(model, sampler_config)\n\n\ndef eval_metrics(\n config, model, test_path, test_scaffolds_path, ptest_path, ptest_scaffolds_path, smiles_path,\n):\n eval_parser = eval_script.get_parser()\n eval_config = eval_parser.parse_args(\n [\n \"--test_path\",\n test_path,\n \"--test_scaffolds_path\",\n test_scaffolds_path,\n \"--ptest_path\",\n ptest_path,\n \"--ptest_scaffolds_path\",\n ptest_scaffolds_path,\n \"--gen_path\",\n #get_generation_path(config, model),\n smiles_path,\n \"--n_jobs\",\n str(config.n_jobs),\n \"--gpu\",\n str(config.gpu),\n ]\n )\n metrics = eval_script.main(eval_config, print_metrics=False)\n\n return metrics\n\n\ndef main(config):\n if not os.path.exists(config.data_dir):\n os.mkdir(config.data_dir)\n\n if not os.path.exists(config.checkpoint_dir):\n os.mkdir(config.checkpoint_dir)\n\n train_path = os.path.join(config.data_dir, \"train10k.csv\")\n test_path = os.path.join(config.data_dir, \"test.csv\")\n test_scaffolds_path = os.path.join(config.data_dir, \"test_scaffolds.csv\")\n ptest_path = os.path.join(config.data_dir, \"test_stats.npz\")\n ptest_scaffolds_path = os.path.join(config.data_dir, \"test_scaffolds_stats.npz\")\n\n if (\n not os.path.exists(train_path)\n or not os.path.exists(test_path)\n or not os.path.exists(test_scaffolds_path)\n ):\n splitting_config = split_dataset.get_parser()\n conf = [\"--dir\", config.data_dir]\n if config.train_size is not None:\n conf.extend([\"--train_size\", str(config.train_size)])\n if config.test_size is not None:\n conf.extend([\"--test_size\", str(config.test_size)])\n splitting_config = splitting_config.parse_args(conf)\n split_dataset.main(splitting_config)\n\n models = MODELS.get_model_names() if config.model == \"all\" else [config.model]\n for model in models:\n if not os.path.exists(config.lbann_weights_dir): # LBANN is inference only\n train_model(config, model, train_path)\n sample_from_model(config, model,test_path)\n metrics = []\n smiles_paths_tag = ['sampling']\n smiles_paths = [get_generation_path(config,model)]\n if(config.save_reconstruction) :\n smiles_paths.append(get_reconstruction_path(config,model))\n smiles_paths_tag.append('exact recon')\n for model in models:\n for i in range(len(smiles_paths)):\n model_metrics = eval_metrics(\n config,\n model,\n test_path,\n test_scaffolds_path,\n ptest_path,\n ptest_scaffolds_path,\n smiles_paths[i],\n )\n model_metrics.update({\"train size\": config.train_size})\n model_metrics.update({\"test size\": config.test_size})\n model_metrics.update({\"model\": model})\n model_metrics.update({\"mode\": smiles_paths_tag[i]})\n metrics.append(model_metrics)\n \n table = pd.DataFrame(metrics)\n if(config.lbann_weights_dir):\n config.metrics = os.path.join(config.lbann_weights_dir + 'e' + str(config.lbann_epoch_counts) + model + config.experiment_suff + '_metric.csv')\n print(\"Saving computed metrics to \", config.metrics)\n table.to_csv(config.metrics, index=False)\nif __name__ == \"__main__\":\n parser = get_parser()\n config = parser.parse_known_args()[0]\n main(config)\n" ]
[ [ "pandas.DataFrame" ] ]
younggeun-kim/Styleformer
[ "cb2dd2d169727ad968b3a482d3790008b7865af1" ]
[ "training/networks_Generator.py" ]
[ "import math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch_utils import misc\nfrom torch_utils import persistence\nfrom torch_utils.ops import upfirdn2d\nfrom torch_utils.ops import bias_act\nfrom torch_utils.ops import fma\nfrom torch.utils.checkpoint import checkpoint\n\n \n#----------------------------------------------------------------------------\n\[email protected]_function\ndef normalize_2nd_moment(x, dim=1, eps=1e-8):\n return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt()\n\n#----------------------------------------------------------------------------\n\[email protected]_function\ndef modulated_mlp(x, fc1_weight, fc2_weight, u_weight, activation, mlp_ratio, mlp_drop, styles):\n batch_size = x.shape[0]\n seq_length = x.shape[1]\n hidden_dimension = x.shape[2]\n act_func = get_act(activation)\n layernorm = nn.InstanceNorm1d(seq_length, affine=False)\n skip = x\n \n \n styles1 = styles[:, :hidden_dimension]\n styles2 = styles[:, hidden_dimension:]\n \n x = x * styles1.to(x.dtype).reshape(batch_size, 1, -1)\n x = layernorm(x)\n \n \n fc1 = None\n fc2 = None\n fc1_dcoefs = None\n fc2_dcoefs = None\n \n fc1 = fc1_weight.unsqueeze(0)\n fc2 = fc2_weight.unsqueeze(0)\n fc1 = fc1 * styles1.reshape(batch_size, 1, -1)\n fc2 = fc2 * styles2.reshape(batch_size, 1, -1)\n \n \n fc1_dcoefs = (fc1.square().sum(dim=[2]) + 1e-8).rsqrt()\n fc2_dcoefs = (fc2.square().sum(dim=[2]) + 1e-8).rsqrt()\n \n x = torch.matmul(x, fc1_weight.t().to(x.dtype))\n x = x * fc1_dcoefs.to(x.dtype).reshape(batch_size, 1, -1)\n \n x = x * styles2.to(x.dtype).reshape(batch_size, 1, -1)\n x = act_func(x)\n #x = F.dropout(x, p=mlp_drop)\n x = torch.matmul(x, fc2_weight.t().to(x.dtype))\n x = x * fc2_dcoefs.to(x.dtype).reshape(batch_size, 1, -1)\n if x.shape[2] != skip.shape[2]:\n print(\"bad\")\n u = None\n u_dcoefs = None\n\n u = u_weight\n u_dcoefs = (u.square().sum(dim=[1]) + 1e-8).rsqrt()\n\n skip = torch.matmul(skip, u_weight.t().to(x.dtype))\n skip = skip * u_dcoefs.to(x.dtype).reshape(1, 1, -1)\n #x = F.dropout(x, p=mlp_drop)\n \n return x\n\n#----------------------------------------------------------------------------\n\[email protected]_function\ndef modulated_style_mlp(x, weight, styles):\n batch_size = x.shape[0]\n channel = x.shape[1]\n width = x.shape[2]\n height = x.shape[3]\n\n w = None\n dcoefs = None\n \n w = weight.unsqueeze(0)\n w = w * styles.reshape(batch_size, 1, -1)\n dcoefs = (w.square().sum(dim=[2]) + 1e-8).rsqrt()\n \n x = x.reshape(batch_size, channel, width*height).permute(0, 2, 1)\n x = x * styles.to(x.dtype).reshape(batch_size, 1, -1)\n x = torch.matmul(x, weight.t().to(x.dtype))\n x = x * dcoefs.to(x.dtype).reshape(batch_size, 1, -1)\n x = x.permute(0, 2, 1).reshape(batch_size, -1, width, height)\n \n return x\n \n#----------------------------------------------------------------------------\n\[email protected]_function\ndef modulated_channel_attention(x, q_weight, k_weight, v_weight, w_weight, u_weight, proj_weight, styles, num_heads):\n \n batch_size = x.shape[0]\n seq_length = x.shape[1]\n hidden_dimension = x.shape[2]\n \n assert hidden_dimension % num_heads == 0\n \n depth = hidden_dimension // num_heads\n \n attention_scale = torch.tensor(depth ** -0.5).to(x.dtype)\n\n layernorm = nn.InstanceNorm1d(seq_length, affine=False) \n \n styles1 = styles[:, :hidden_dimension]\n styles2 = styles[:, hidden_dimension:]\n\n\n x = x * styles1.to(x.dtype).reshape(batch_size, 1, -1)\n x = layernorm(x)\n \n q = q_weight.unsqueeze(0)\n q = q * styles1.reshape(batch_size, 1, -1)\n q_dcoefs = (q.square().sum(dim=[2]) + 1e-8).rsqrt()\n \n k = k_weight.unsqueeze(0)\n k = k * styles1.reshape(batch_size, 1, -1)\n k_dcoefs = (k.square().sum(dim=[2]) + 1e-8).rsqrt()\n \n v = v_weight.unsqueeze(0)\n v = v * styles1.reshape(batch_size, 1, -1)\n v_dcoefs = (v.square().sum(dim=[2]) + 1e-8).rsqrt()\n \n w = w_weight.unsqueeze(0)\n w = w * styles2.reshape(batch_size, 1, -1)\n w_dcoefs = (w.square().sum(dim=[2]) + 1e-8).rsqrt()\n \n \n q_value = torch.matmul(x, q_weight.t().to(x.dtype)) * q_dcoefs.to(x.dtype).reshape(batch_size, 1, -1)\n q_value = q_value.reshape(batch_size, seq_length, num_heads, depth).permute(0,2,1,3)\n k_value = torch.matmul(x, k_weight.t().to(x.dtype)) * k_dcoefs.to(x.dtype).reshape(batch_size, 1, -1)\n k_value = k_value.reshape(batch_size, seq_length, num_heads, depth).permute(0,2,1,3)\n if proj_weight is not None:\n k_value = torch.matmul(k_value.permute(0,1,3,2), proj_weight.t().to(x.dtype)).permute(0,1,3,2)\n v_value = torch.matmul(x, v_weight.t().to(x.dtype))\n\n v_value = v_value * v_dcoefs.to(x.dtype).reshape(batch_size, 1, -1)\n \n v_value = v_value * styles2.to(x.dtype).reshape(batch_size, 1, -1)\n skip = v_value\n if proj_weight is not None:\n v_value = torch.matmul(v_value.permute(0,2,1), proj_weight.t().to(x.dtype)).permute(0,2,1)\n v_value = v_value.reshape(batch_size, 256, num_heads, depth).permute(0,2,1,3)\n \n else:\n v_value = v_value.reshape(batch_size, seq_length, num_heads, depth).permute(0,2,1,3)\n \n attn = torch.matmul(q_value, k_value.permute(0,1,3,2)) * attention_scale \n revised_attn = attn \n\n attn_score = revised_attn.softmax(dim=-1)\n\n x = torch.matmul(attn_score , v_value).permute(0, 2, 1, 3).reshape(batch_size, seq_length, hidden_dimension) \n\n x = torch.matmul(x, w_weight.t().to(x.dtype))\n\n x = x * w_dcoefs.to(x.dtype).reshape(batch_size, 1, -1)\n \n u = u_weight.unsqueeze(0)\n u = u * styles2.reshape(batch_size, 1, -1)\n u_dcoefs = (u.square().sum(dim=[2]) + 1e-8).rsqrt()\n \n #skip = torch.matmul(skip, u_weight.t().to(x.dtype))\n #skip = skip * u_dcoefs.to(x.dtype).reshape(batch_size, 1, -1)\n \n x = x #+ skip\n\n return x \n\n#----------------------------------------------------------------------------\n\[email protected]_class\nclass FullyConnectedLayer(nn.Module):\n def __init__(self,\n in_features, # Number of input features.\n out_features, # Number of output features.\n bias = True, # Apply additive bias before the activation function?\n activation = 'linear', # Activation function: 'relu', 'lrelu', etc.\n lr_multiplier = 1, # Learning rate multiplier.\n bias_init = 0, # Initial value for the additive bias.\n ):\n super().__init__()\n self.activation = activation\n self.in_features = in_features\n self.out_features = out_features\n self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) / lr_multiplier)\n self.bias = torch.nn.Parameter(torch.full([out_features], np.float32(bias_init))) if bias else None\n self.weight_gain = lr_multiplier / np.sqrt(in_features)\n self.bias_gain = lr_multiplier\n\n def forward(self, x):\n w = self.weight.to(x.dtype) * self.weight_gain\n b = self.bias\n if b is not None:\n b = b.to(x.dtype)\n if self.bias_gain != 1:\n b = b * self.bias_gain\n\n if self.activation == 'linear' and b is not None:\n x = torch.addmm(b.unsqueeze(0), x, w.t())\n else:\n x = x.matmul(w.t())\n x = bias_act.bias_act(x, b, act=self.activation)\n return x\n \n#----------------------------------------------------------------------------\n'''\[email protected]_class\nclass MappingNetwork(nn.Module):\n def __init__(self,\n z_dim, # Input latent (Z) dimensionality, 0 = no latent.\n w_dim, # Intermediate latent (W) dimensionality.\n num_ws, # Number of intermediate latents to output, None = do not broadcast.\n num_layers = 8, # Number of mapping layers.\n embed_features = None, # Label embedding dimensionality, None = same as w_dim.\n layer_features = None, # Number of intermediate features in the mapping layers, None = same as w_dim.\n activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.\n lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers.\n w_avg_beta = 0.995, # Decay for tracking the moving average of W during training, None = do not track.\n ):\n super().__init__()\n self.z_dim = z_dim\n self.w_dim = w_dim\n self.num_ws = num_ws\n self.num_layers = num_layers\n self.w_avg_beta = w_avg_beta\n\n if embed_features is None:\n embed_features =0 # w_dim\n if layer_features is None:\n layer_features = w_dim\n features_list = [z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim]\n for idx in range(num_layers):\n in_features = features_list[idx]\n out_features = features_list[idx + 1]\n layer = FullyConnectedLayer(in_features, out_features, activation=activation, lr_multiplier=lr_multiplier)\n setattr(self, f'fc{idx}', layer)\n\n if num_ws is not None and w_avg_beta is not None:\n self.register_buffer('w_avg', torch.zeros([w_dim]))\n\n def forward(self, z, c=None, truncation_psi=1, truncation_cutoff=None, skip_w_avg_update=False):\n # Embed, normalize, and concat inputs.\n x = None\n with torch.autograd.profiler.record_function('input'):\n if self.z_dim > 0:\n misc.assert_shape(z, [None, self.z_dim])\n x = normalize_2nd_moment(z.to(torch.float32)) \n\n # Main layers\n for idx in range(self.num_layers):\n layer = getattr(self, f'fc{idx}')\n x = layer(x)\n\n # Update moving average of W.\n if self.w_avg_beta is not None and self.training and not skip_w_avg_update:\n with torch.autograd.profiler.record_function('update_w_avg'):\n self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta))\n\n # Broadcast.\n if self.num_ws is not None:\n with torch.autograd.profiler.record_function('broadcast'):\n x = x.unsqueeze(1).repeat([1, self.num_ws, 1])\n\n # Apply truncation.\n \n if truncation_psi != 1:\n with torch.autograd.profiler.record_function('truncate'):\n assert self.w_avg_beta is not None\n if self.num_ws is None or truncation_cutoff is None:\n x = self.w_avg.lerp(x, truncation_psi)\n else:\n x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi)\n return x\n'''\n\[email protected]_class\nclass MappingNetwork(torch.nn.Module):\n def __init__(self,\n z_dim, # Input latent (Z) dimensionality, 0 = no latent.\n c_dim, # Conditioning label (C) dimensionality, 0 = no label.\n w_dim, # Intermediate latent (W) dimensionality.\n num_ws, # Number of intermediate latents to output, None = do not broadcast.\n num_layers = 8, # Number of mapping layers.\n embed_features = None, # Label embedding dimensionality, None = same as w_dim.\n layer_features = None, # Number of intermediate features in the mapping layers, None = same as w_dim.\n activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.\n lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers.\n w_avg_beta = 0.995, # Decay for tracking the moving average of W during training, None = do not track.\n ):\n super().__init__()\n self.z_dim = z_dim\n self.c_dim = c_dim\n self.w_dim = w_dim\n self.num_ws = num_ws\n self.num_layers = num_layers\n self.w_avg_beta = w_avg_beta\n\n if embed_features is None:\n embed_features = w_dim\n if c_dim == 0:\n embed_features = 0\n if layer_features is None:\n layer_features = w_dim\n features_list = [z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim]\n\n if c_dim > 0:\n self.embed = FullyConnectedLayer(c_dim, embed_features)\n for idx in range(num_layers):\n in_features = features_list[idx]\n out_features = features_list[idx + 1]\n layer = FullyConnectedLayer(in_features, out_features, activation=activation, lr_multiplier=lr_multiplier)\n setattr(self, f'fc{idx}', layer)\n\n if num_ws is not None and w_avg_beta is not None:\n self.register_buffer('w_avg', torch.zeros([w_dim]))\n\n def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, skip_w_avg_update=False):\n # Embed, normalize, and concat inputs.\n x = None\n with torch.autograd.profiler.record_function('input'):\n if self.z_dim > 0:\n misc.assert_shape(z, [None, self.z_dim])\n x = normalize_2nd_moment(z.to(torch.float32))\n if self.c_dim > 0:\n misc.assert_shape(c, [None, self.c_dim])\n y = normalize_2nd_moment(self.embed(c.to(torch.float32)))\n x = torch.cat([x, y], dim=1) if x is not None else y\n\n # Main layers.\n for idx in range(self.num_layers):\n layer = getattr(self, f'fc{idx}')\n x = layer(x)\n\n # Update moving average of W.\n if self.w_avg_beta is not None and self.training and not skip_w_avg_update:\n with torch.autograd.profiler.record_function('update_w_avg'):\n self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta))\n\n # Broadcast.\n if self.num_ws is not None:\n with torch.autograd.profiler.record_function('broadcast'):\n x = x.unsqueeze(1).repeat([1, self.num_ws, 1])\n\n # Apply truncation.\n if truncation_psi != 1:\n with torch.autograd.profiler.record_function('truncate'):\n assert self.w_avg_beta is not None\n if self.num_ws is None or truncation_cutoff is None:\n x = self.w_avg.lerp(x, truncation_psi)\n else:\n x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi)\n return x\n\n#----------------------------------------------------------------------------\n\[email protected]_class\nclass Encoderlayer(nn.Module):\n def __init__(self, h_dim, w_dim, out_dim, seq_length, depth, minimum_head, use_noise=True, conv_clamp=None, proj_weight=None, channels_last=False):\n super().__init__()\n self.h_dim = h_dim\n self.num_heads = max(minimum_head, h_dim // depth)\n self.w_dim = w_dim\n self.out_dim = out_dim\n self.seq_length = seq_length\n self.use_noise = use_noise\n self.conv_clamp = conv_clamp\n self.affine1 = FullyConnectedLayer(w_dim, h_dim*2, bias_init=1)\n \n memory_format = torch.channels_last if channels_last else torch.contiguous_format\n \n self.q_weight = torch.nn.Parameter(torch.FloatTensor(h_dim, h_dim).uniform_(-1./math.sqrt(h_dim), 1./math.sqrt(h_dim)).to(memory_format=memory_format))\n self.k_weight = torch.nn.Parameter(torch.FloatTensor(h_dim, h_dim).uniform_(-1./math.sqrt(h_dim), 1./math.sqrt(h_dim)).to(memory_format=memory_format)) \n self.v_weight = torch.nn.Parameter(torch.FloatTensor(h_dim, h_dim).uniform_(-1./math.sqrt(h_dim), 1./math.sqrt(h_dim)).to(memory_format=memory_format))\n self.w_weight = torch.nn.Parameter(torch.FloatTensor(out_dim, h_dim).uniform_(-1./math.sqrt(h_dim), 1./math.sqrt(h_dim)).to(memory_format=memory_format))\n \n self.proj_weight = proj_weight\n \n self.u_weight = torch.nn.Parameter(torch.FloatTensor(out_dim, h_dim).uniform_(-1./math.sqrt(h_dim), 1./math.sqrt(h_dim)).to(memory_format=memory_format))\n if use_noise:\n self.register_buffer('noise_const', torch.randn([self.seq_length, 1]))\n self.noise_strength = torch.nn.Parameter(torch.zeros([]))\n self.bias = torch.nn.Parameter(torch.zeros([out_dim]))\n \n\n \n def forward(self, x, w, noise_mode='random', gain=1):\n assert noise_mode in ['random', 'const', 'none']\n misc.assert_shape(x, [None, self.seq_length, self.h_dim])\n styles1 = self.affine1(w)\n \n noise = None\n if self.use_noise and noise_mode == 'random':\n noise = torch.randn([x.shape[0], self.seq_length, 1], device = x.device) * self.noise_strength\n if self.use_noise and noise_mode == 'const':\n noise = self.noise_const * self.noise_strength\n \n x = modulated_channel_attention(x=x, q_weight=self.q_weight, k_weight=self.k_weight, v_weight=self.v_weight, w_weight=self.w_weight, u_weight=self.u_weight, proj_weight=self.proj_weight, styles=styles1, num_heads=self.num_heads) \n \n if noise is not None:\n x = x.add_(noise)\n \n act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None\n x = x + self.bias.to(x.dtype)\n x = F.leaky_relu(x, negative_slope=0.2)\n x = torch.clamp(x, max=act_clamp, min=-act_clamp)\n return x\n \n \n#----------------------------------------------------------------------------\n\[email protected]_class\nclass ToRGBLayer(torch.nn.Module):\n def __init__(self, in_channels, out_channels, w_dim, conv_clamp=None, channels_last=False):\n super().__init__()\n self.conv_clamp = None\n self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)\n memory_format = torch.channels_last if channels_last else torch.contiguous_format\n self.weight = torch.nn.Parameter(torch.FloatTensor(out_channels, in_channels).uniform_(-1./math.sqrt(in_channels), 1./math.sqrt(in_channels)).to(memory_format=memory_format))\n self.bias = torch.nn.Parameter(torch.zeros([out_channels]))\n\n def forward(self, x, w, fused_modconv=True):\n styles = self.affine(w) \n x = modulated_style_mlp(x=x, weight=self.weight, styles=styles)\n x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp)\n return x\n \n#---------------------------------------------------------------------------- \n\[email protected]_class\nclass EncoderBlock(nn.Module):\n def __init__(self, h_dim, w_dim, out_dim, depth, minimum_head, img_resolution, resolution, img_channels, is_first, is_last, architecture='skip', linformer=False, conv_clamp=None, use_fp16=False, fp16_channels_last=False, resample_filter =[1,3,3,1], scale_ratio=2, **layer_kwargs):\n super().__init__()\n self.h_dim = h_dim\n self.w_dim = w_dim\n self.out_dim = out_dim\n self.depth = depth\n self.minimum_head = minimum_head\n self.img_resolution = img_resolution\n self.resolution = resolution\n self.img_channels = img_channels\n self.seq_length = resolution * resolution\n self.is_first = is_first\n self.is_last = is_last\n self.architecture = architecture\n self.use_fp16 = use_fp16\n self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))\n self.channels_last = (use_fp16 and fp16_channels_last)\n self.num_attention = 0\n self.num_torgb = 0\n self.scale_ratio = scale_ratio\n self.conv_clamp = conv_clamp\n self.proj_weight = None\n \n memory_format = torch.contiguous_format\n \n if self.resolution>=32 and linformer:\n self.proj_weight = torch.nn.Parameter(torch.FloatTensor(256, self.seq_length ).uniform_(-1./math.sqrt(self.seq_length), 1./math.sqrt(self.seq_length)).to(memory_format=memory_format))\n \n \n \n if self.is_first and self.resolution == 8:\n self.const = torch.nn.Parameter(torch.randn([self.seq_length, self.h_dim]))\n \n if self.is_first:\n self.pos_embedding = torch.nn.Parameter(torch.zeros(1, self.seq_length, self.h_dim))\n \n if not self.is_last or out_dim is None:\n self.out_dim = h_dim\n \n self.enc = Encoderlayer(h_dim=self.h_dim, w_dim=self.w_dim, out_dim=self.out_dim, seq_length=self.seq_length, depth=self.depth, minimum_head=self.minimum_head, conv_clamp=self.conv_clamp, proj_weight=self.proj_weight)\n self.num_attention += 1\n \n if self.is_last and self.architecture == 'skip':\n self.torgb = ToRGBLayer(self.out_dim, self.img_channels, w_dim=w_dim, conv_clamp=conv_clamp, channels_last=self.channels_last)\n self.num_torgb += 1\n \n \n def forward(self, x, img, ws, force_fp32=True, fused_modconv=None):\n misc.assert_shape(ws, [None, self.num_attention + self.num_torgb, self.w_dim])\n w_iter = iter(ws.unbind(dim=1))\n dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32\n memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format\n if fused_modconv is None:\n with misc.suppress_tracer_warnings(): # this value will be treated as a constant\n fused_modconv = (not self.training) and (dtype == torch.float32 or int(x.shape[0]) == 1)\n \n #Input\n if self.is_first and self.resolution == 8:\n x = self.const.to(dtype=dtype, memory_format=memory_format)\n x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1])\n else:\n misc.assert_shape(x, [None, self.seq_length, self.h_dim])\n x = x.to(dtype=dtype, memory_format=memory_format)\n \n #Main layers\n if self.is_first:\n x = x + self.pos_embedding\n\n \n if self.architecture == 'resnet':\n y = self.skip(x.permute(0,2,1).reshape(ws.shape[0], self.h_dim, self.resolution, self.resolution))\n x = self.enc(x, next(w_iter))\n y = y.reshape(ws.shape[0], self.h_dim, self.seq_length)\n x = y.add_(x)\n else:\n x = self.enc(x, next(w_iter)).to(dtype=dtype, memory_format=memory_format)\n #ToRGB\n if self.is_last:\n if img is not None:\n misc.assert_shape(img, [None, self.img_channels, self.resolution // 2, self.resolution //2])\n img = upfirdn2d.upsample2d(img, self.resample_filter)\n \n if self.architecture == 'skip':\n y = self.torgb(x.permute(0,2,1).reshape(ws.shape[0], self.out_dim, self.resolution, self.resolution), next(w_iter), fused_modconv=fused_modconv)\n y = y.to(dtype=torch.float32, memory_format=torch.contiguous_format)\n img = img.add_(y) if img is not None else y\n #upsample\n if self.resolution!=self.img_resolution:\n x = upfirdn2d.upsample2d(x.permute(0,2,1).reshape(ws.shape[0], self.out_dim, self.resolution, self.resolution), self.resample_filter)\n x = x.reshape(ws.shape[0], self.out_dim, self.seq_length * self.scale_ratio **2).permute(0,2,1)\n \n \n \n assert x.dtype == dtype\n assert img is None or img.dtype == torch.float32\n return x, img\n\n#----------------------------------------------------------------------------\n\[email protected]_class\nclass SynthesisNetwork(nn.Module):\n def __init__(self, w_dim, img_resolution, img_channels, depth, minimum_head, num_layers, G_dict, conv_clamp, channel_base = 8192, channel_max = 256, num_fp16_res = 0, linformer=False):\n assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0\n super().__init__() \n self.w_dim = w_dim\n self.img_resolution = img_resolution\n self.img_resolution_log2 = int(np.log2(img_resolution))\n self.img_channels = img_channels\n self.num_block = num_layers\n self.block_resolutions = [2 ** i for i in range(3, self.img_resolution_log2 + 1)]\n assert len(self.block_resolutions) == len(self.num_block)\n channels_dict = dict(zip(*[self.block_resolutions, G_dict]))\n fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)\n \n self.num_ws = 0\n for i, res in enumerate(self.block_resolutions):\n h_dim = channels_dict[res]\n out_dim = None\n if res!=self.img_resolution:\n out_dim = channels_dict[res*2]\n use_fp16 = (res >= fp16_resolution)\n num_block_res = self.num_block[i]\n for j in range(num_block_res):\n is_first = (j == 0)\n is_last = (j == num_block_res - 1)\n block = EncoderBlock(\n h_dim=h_dim, w_dim=w_dim, out_dim=out_dim, depth=depth, minimum_head=minimum_head, img_resolution=img_resolution, resolution=res, img_channels=img_channels, \n is_first=is_first, is_last=is_last, use_fp16=use_fp16, conv_clamp=conv_clamp, linformer=linformer\n )\n self.num_ws += block.num_attention\n if is_last:\n self.num_ws += block.num_torgb\n setattr(self, f'b{res}_{j}', block)\n\n def forward(self, ws=None):\n block_ws = []\n with torch.autograd.profiler.record_function('split_ws'):\n misc.assert_shape(ws, [None, self.num_ws, self.w_dim])\n ws = ws.to(torch.float32)\n w_idx = 0\n for i, res in enumerate(self.block_resolutions):\n num_block_res = self.num_block[i]\n res_ws = []\n for j in range(num_block_res):\n block = getattr(self, f'b{res}_{j}')\n res_ws.append(ws.narrow(1, w_idx, block.num_attention + block.num_torgb))\n w_idx += block.num_attention\n block_ws.append(res_ws)\n \n x = img = None\n for i, (res, cur_ws) in enumerate(zip(self.block_resolutions, block_ws)):\n num_block_res = self.num_block[i]\n for j in range(num_block_res):\n block = getattr(self, f'b{res}_{j}')\n x, img = block(x, img, cur_ws[j])\n \n return img\n\n#----------------------------------------------------------------------------\n\[email protected]_class\nclass Generator(nn.Module):\n def __init__(self, z_dim, c_dim, w_dim, img_resolution, img_channels, mapping_kwargs = {}, synthesis_kwargs = {}):\n super().__init__()\n self.z_dim = z_dim\n self.c_dim = c_dim\n self.w_dim = w_dim\n self.img_resolution = img_resolution\n self.img_channels = img_channels\n self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs)\n self.num_ws = self.synthesis.num_ws\n self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs) \n \n def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, epoch=None, **synthesis_kwargs):\n ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff)\n output = self.synthesis(ws) \n return output\n\n\n\n\n" ]
[ [ "torch.autograd.profiler.record_function", "numpy.log2", "numpy.sqrt", "torch.nn.InstanceNorm1d", "torch.zeros", "torch.randn", "torch.cat", "torch.tensor", "torch.matmul", "torch.FloatTensor", "torch.nn.functional.leaky_relu", "numpy.float32", "torch.clamp" ] ]
M3nin0/iris-analysis
[ "2562e611f1535a33bee76fdc281c58bfd74fbca3" ]
[ "Process.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\nclass Process(object):\n \n def __init__(self, data):\n self.data = data\n\n def plotSetosa(self):\n # Exibindo o comprimento da sépala da iris setosa\n plt.plot(self.data[:50, 0], 'r-', marker = 'o', ms = 6, label = 'Comp. Sépala Iris-Setosa')\n plt.show()\n\n def plotVscolor(self):\n # Exibindo o comprimento da sépala da iris Versicolour\n plt.plot(self.data[50: 100, 0], 'g--', marker = 'o', ms = 6, label = 'Comp. Sépala Iris-Versicolour')\n plt.show()\n \n def plotCompSetVs(self):\n plt.plot(self.data[:50, 0], c ='Black', ls = '-', marker = 's', ms = 6, label = 'Comp. Sépala Iris-Setosa')\n plt.plot(self.data[50: 100, 0], 'r-', marker = 'o', ms = 6, label = 'Comp. Sépala Iris-Versicolour')\n plt.legend()\n plt.show()" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.show" ] ]
baidu/information-extraction
[ "748c8915858ce8b82c3ce5a828cf0d79c54874d9" ]
[ "bin/p_classification/p_infer.py" ]
[ "# -*- coding: utf-8 -*-\n########################################################\n# Copyright (c) 2019, Baidu Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# imitations under the License.\n########################################################\n\"\"\"\nThis module to infer with a p classification model\n\"\"\"\n\nimport json\nimport os\nimport sys\nimport argparse\nimport ConfigParser\nimport math\n\nimport numpy as np\nimport paddle\nimport paddle.fluid as fluid\n\nimport p_data_reader\n\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), \"../../lib\")))\nimport conf_lib\n\n\ndef predict_infer(conf_dict, data_reader, predict_data_path, \\\n predict_result_path, model_path):\n \"\"\"\n Predict with trained models \n \"\"\"\n if len(predict_result_path) > 0:\n result_writer = open(predict_result_path, 'w')\n else:\n result_writer = sys.stdout\n\n np.set_printoptions(precision=3)\n if len(model_path) == 0:\n return\n\n place = fluid.CPUPlace()\n word = fluid.layers.data(\n name='word_data', shape=[1], dtype='int64', lod_level=1)\n postag = fluid.layers.data(\n name='token_pos', shape=[1], dtype='int64', lod_level=1)\n feeder = fluid.DataFeeder(feed_list=[word, postag], place=place)\n exe = fluid.Executor(place)\n\n test_batch_reader = paddle.batch(\n paddle.reader.buffered(data_reader.get_predict_reader\\\n (predict_data_path, need_input=True, need_label=False),\n size=8192),\n batch_size=conf_dict[\"batch_size\"])\n inference_scope = fluid.core.Scope()\n with fluid.scope_guard(inference_scope):\n [inference_program, feed_target_names, fetch_targets] = \\\n fluid.io.load_inference_model(\n model_path, exe, params_filename='params')\n\n # batch\n batch_id = 0\n for data in test_batch_reader():\n feeder_data = []\n input_data = []\n for item in data:\n input_dic = json.loads(item[0])\n input_data.append(input_dic)\n feeder_data.append(item[1:])\n results = exe.run(inference_program, feed=feeder.feed(feeder_data),\n fetch_list=fetch_targets, return_numpy=False)\n label_scores = np.array(results[0]).tolist()\n #infer a batch\n infer_a_batch(label_scores, input_data, result_writer, data_reader)\n \n batch_id += 1\n\n\ndef infer_a_batch(label_scores, input_data, result_writer, data_reader):\n \"\"\"Infer the results of a batch\"\"\"\n for sent_idx, label in enumerate(label_scores):\n p_label = []\n label = map(float, label)\n for p_idx, p_score in enumerate(label):\n if sigmoid(p_score) > 0.5:\n p_label.append(data_reader.get_label_output(p_idx))\n for p in p_label:\n output_fields = [json.dumps(input_data[sent_idx], ensure_ascii=False), p]\n result_writer.write('\\t'.join(output_fields).encode('utf-8'))\n result_writer.write('\\n')\n\n\ndef sigmoid(x):\n \"\"\"sigmode function\"\"\"\n return math.exp(x) / (1 + math.exp(x))\n\n\ndef main(conf_dict, model_path, predict_data_path, \n predict_result_path, use_cuda=False):\n \"\"\"Predict main function\"\"\"\n if use_cuda and not fluid.core.is_compiled_with_cuda():\n return\n data_generator = p_data_reader.RcDataReader(\n wordemb_dict_path=conf_dict['word_idx_path'],\n postag_dict_path=conf_dict['postag_dict_path'],\n label_dict_path=conf_dict['label_dict_path'],\n train_data_list_path=conf_dict['train_data_path'],\n test_data_list_path=conf_dict['test_data_path'])\n \n predict_infer(conf_dict, data_generator, predict_data_path, \\\n predict_result_path, model_path)\n\n\nif __name__ == '__main__':\n # Load configuration file\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--conf_path\", type=str,\n help=\"conf_file_path_for_model. (default: %(default)s)\",\n required=True)\n parser.add_argument(\"--model_path\", type=str,\n help=\"model_path\", required=True)\n parser.add_argument(\"--predict_file\", type=str,\n help=\"the_file_to_be_predicted\", required=True)\n parser.add_argument(\"--result_file\", type=str,\n default='', help=\"the_file_of_predicted_results\")\n args = parser.parse_args()\n conf_dict = conf_lib.load_conf(args.conf_path)\n model_path = args.model_path\n predict_data_path = args.predict_file\n predict_result_path = args.result_file\n for input_path in [model_path, predict_data_path]:\n if not os.path.exists(input_path):\n raise ValueError(\"%s not found.\" % (input_path))\n main(conf_dict, model_path, predict_data_path, predict_result_path)\n" ]
[ [ "numpy.set_printoptions", "numpy.array" ] ]
Aitical/ADspeech2face
[ "2e811ff8cc7333729f4b77d1b1067296253e8e38" ]
[ "test_scripts/stylegan.py" ]
[ "import torch\nfrom torchvision.utils import save_image\nfrom models.stylegan2_pytorch import ModelLoader\n\n\nloader = ModelLoader(\n base_dir = '/home/aitical/Documents/paper_with_code/speech2face/stylegan2/', # path to where you invoked the command line tool\n name = 'default' # the project name, defaults to 'default'\n)\n\nnoise = torch.randn(3, 512).cuda() # noise\nstyles = loader.noise_to_styles(noise, trunc_psi = 0.7) # pass through mapping network\nprint(styles.shape)\nimages = loader.styles_to_images(styles) # call the generator on intermediate style vectors\n\nsave_image(images, './sample.jpg')\n\ng_net = loader.model.GAN.GE\nfor p in g_net.parameters():\n if p.requires_grad:\n print(p, p.requires_grad)\n\nstyles = torch.rand(3, 6, 512).cuda()\nnoise = torch.rand(3, 128, 128, 1).cuda()\nimg = loader.model.GAN.GE(styles, noise)\nprint(img.shape)" ]
[ [ "torch.randn", "torch.rand" ] ]
reppertj/earworm
[ "5c3d457e2c09ce96be75fcb19cd9acf819b84c4b" ]
[ "backend/app/app/tests/utils/utils.py" ]
[ "import random\nfrom numpy.random import default_rng\nimport numpy as np\nimport string\nfrom typing import Dict, List\n\nfrom fastapi.testclient import TestClient\n\nfrom app.core.config import settings\n\nrng = default_rng(42)\n\ndef random_lower_string() -> str:\n return \"\".join(random.choices(string.ascii_lowercase, k=32))\n\n\ndef random_email() -> str:\n return f\"{random_lower_string()}@{random_lower_string()}.com\"\n\ndef random_url() -> str:\n return f\"https://{random.choice(['www.', ''])}{random_lower_string()}.com\"\n\ndef random_unit_vector(dim=128) -> List[float]:\n def normalize(ary):\n norm = np.linalg.norm(ary)\n return ary if norm == 0 else ary / norm\n return list(normalize(rng.standard_normal(dim)))\n\ndef get_superuser_token_headers(client: TestClient) -> Dict[str, str]:\n login_data = {\n \"username\": settings.FIRST_SUPERUSER,\n \"password\": settings.FIRST_SUPERUSER_PASSWORD,\n }\n r = client.post(f\"{settings.API_V1_STR}/login/access-token\", data=login_data)\n tokens = r.json()\n a_token = tokens[\"access_token\"]\n headers = {\"Authorization\": f\"Bearer {a_token}\"}\n return headers\n" ]
[ [ "numpy.linalg.norm", "numpy.random.default_rng" ] ]
jiang1997/mmcv
[ "571e3e5fc75c23b45cbd9b00011af094357c5f1d" ]
[ "mmcv/runner/fp16_utils.py" ]
[ "import functools\nimport warnings\nfrom collections import abc\nfrom inspect import getfullargspec\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom mmcv.utils import TORCH_VERSION, digit_version\nfrom .dist_utils import allreduce_grads as _allreduce_grads\n\ntry:\n # If PyTorch version >= 1.6.0, torch.cuda.amp.autocast would be imported\n # and used; otherwise, auto fp16 will adopt mmcv's implementation.\n # Note that when PyTorch >= 1.6.0, we still cast tensor types to fp16\n # manually, so the behavior may not be consistant with real amp.\n from torch.cuda.amp import autocast\nexcept ImportError:\n pass\n\n\ndef cast_tensor_type(inputs, src_type, dst_type):\n \"\"\"Recursively convert Tensor in inputs from src_type to dst_type.\n\n Args:\n inputs: Inputs that to be casted.\n src_type (torch.dtype): Source type..\n dst_type (torch.dtype): Destination type.\n\n Returns:\n The same type with inputs, but all contained Tensors have been cast.\n \"\"\"\n if isinstance(inputs, nn.Module):\n return inputs\n elif isinstance(inputs, torch.Tensor):\n return inputs.to(dst_type)\n elif isinstance(inputs, str):\n return inputs\n elif isinstance(inputs, np.ndarray):\n return inputs\n elif isinstance(inputs, abc.Mapping):\n return type(inputs)({\n k: cast_tensor_type(v, src_type, dst_type)\n for k, v in inputs.items()\n })\n elif isinstance(inputs, abc.Iterable):\n return type(inputs)(\n cast_tensor_type(item, src_type, dst_type) for item in inputs)\n else:\n return inputs\n\n\ndef auto_fp16(apply_to=None, out_fp32=False):\n \"\"\"Decorator to enable fp16 training automatically.\n\n This decorator is useful when you write custom modules and want to support\n mixed precision training. If inputs arguments are fp32 tensors, they will\n be converted to fp16 automatically. Arguments other than fp32 tensors are\n ignored. If you are using PyTorch >= 1.6, torch.cuda.amp is used as the\n backend, otherwise, original mmcv implementation will be adopted.\n\n Args:\n apply_to (Iterable, optional): The argument names to be converted.\n `None` indicates all arguments.\n out_fp32 (bool): Whether to convert the output back to fp32.\n\n Example:\n\n >>> import torch.nn as nn\n >>> class MyModule1(nn.Module):\n >>>\n >>> # Convert x and y to fp16\n >>> @auto_fp16()\n >>> def forward(self, x, y):\n >>> pass\n\n >>> import torch.nn as nn\n >>> class MyModule2(nn.Module):\n >>>\n >>> # convert pred to fp16\n >>> @auto_fp16(apply_to=('pred', ))\n >>> def do_something(self, pred, others):\n >>> pass\n \"\"\"\n\n def auto_fp16_wrapper(old_func):\n\n @functools.wraps(old_func)\n def new_func(*args, **kwargs):\n # check if the module has set the attribute `fp16_enabled`, if not,\n # just fallback to the original method.\n if not isinstance(args[0], torch.nn.Module):\n raise TypeError('@auto_fp16 can only be used to decorate the '\n 'method of nn.Module')\n if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):\n return old_func(*args, **kwargs)\n\n # get the arg spec of the decorated method\n args_info = getfullargspec(old_func)\n # get the argument names to be casted\n args_to_cast = args_info.args if apply_to is None else apply_to\n # convert the args that need to be processed\n new_args = []\n # NOTE: default args are not taken into consideration\n if args:\n arg_names = args_info.args[:len(args)]\n for i, arg_name in enumerate(arg_names):\n if arg_name in args_to_cast:\n new_args.append(\n cast_tensor_type(args[i], torch.float, torch.half))\n else:\n new_args.append(args[i])\n # convert the kwargs that need to be processed\n new_kwargs = {}\n if kwargs:\n for arg_name, arg_value in kwargs.items():\n if arg_name in args_to_cast:\n new_kwargs[arg_name] = cast_tensor_type(\n arg_value, torch.float, torch.half)\n else:\n new_kwargs[arg_name] = arg_value\n # apply converted arguments to the decorated method\n if (TORCH_VERSION != 'parrots' and\n digit_version(TORCH_VERSION) >= digit_version('1.6.0')):\n with autocast(enabled=True):\n output = old_func(*new_args, **new_kwargs)\n else:\n output = old_func(*new_args, **new_kwargs)\n # cast the results back to fp32 if necessary\n if out_fp32:\n output = cast_tensor_type(output, torch.half, torch.float)\n return output\n\n return new_func\n\n return auto_fp16_wrapper\n\n\ndef force_fp32(apply_to=None, out_fp16=False):\n \"\"\"Decorator to convert input arguments to fp32 in force.\n\n This decorator is useful when you write custom modules and want to support\n mixed precision training. If there are some inputs that must be processed\n in fp32 mode, then this decorator can handle it. If inputs arguments are\n fp16 tensors, they will be converted to fp32 automatically. Arguments other\n than fp16 tensors are ignored. If you are using PyTorch >= 1.6,\n torch.cuda.amp is used as the backend, otherwise, original mmcv\n implementation will be adopted.\n\n Args:\n apply_to (Iterable, optional): The argument names to be converted.\n `None` indicates all arguments.\n out_fp16 (bool): Whether to convert the output back to fp16.\n\n Example:\n\n >>> import torch.nn as nn\n >>> class MyModule1(nn.Module):\n >>>\n >>> # Convert x and y to fp32\n >>> @force_fp32()\n >>> def loss(self, x, y):\n >>> pass\n\n >>> import torch.nn as nn\n >>> class MyModule2(nn.Module):\n >>>\n >>> # convert pred to fp32\n >>> @force_fp32(apply_to=('pred', ))\n >>> def post_process(self, pred, others):\n >>> pass\n \"\"\"\n\n def force_fp32_wrapper(old_func):\n\n @functools.wraps(old_func)\n def new_func(*args, **kwargs):\n # check if the module has set the attribute `fp16_enabled`, if not,\n # just fallback to the original method.\n if not isinstance(args[0], torch.nn.Module):\n raise TypeError('@force_fp32 can only be used to decorate the '\n 'method of nn.Module')\n if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):\n return old_func(*args, **kwargs)\n # get the arg spec of the decorated method\n args_info = getfullargspec(old_func)\n # get the argument names to be casted\n args_to_cast = args_info.args if apply_to is None else apply_to\n # convert the args that need to be processed\n new_args = []\n if args:\n arg_names = args_info.args[:len(args)]\n for i, arg_name in enumerate(arg_names):\n if arg_name in args_to_cast:\n new_args.append(\n cast_tensor_type(args[i], torch.half, torch.float))\n else:\n new_args.append(args[i])\n # convert the kwargs that need to be processed\n new_kwargs = dict()\n if kwargs:\n for arg_name, arg_value in kwargs.items():\n if arg_name in args_to_cast:\n new_kwargs[arg_name] = cast_tensor_type(\n arg_value, torch.half, torch.float)\n else:\n new_kwargs[arg_name] = arg_value\n # apply converted arguments to the decorated method\n if (TORCH_VERSION != 'parrots' and\n digit_version(TORCH_VERSION) >= digit_version('1.6.0')):\n with autocast(enabled=False):\n output = old_func(*new_args, **new_kwargs)\n else:\n output = old_func(*new_args, **new_kwargs)\n # cast the results back to fp32 if necessary\n if out_fp16:\n output = cast_tensor_type(output, torch.float, torch.half)\n return output\n\n return new_func\n\n return force_fp32_wrapper\n\n\ndef allreduce_grads(params, coalesce=True, bucket_size_mb=-1):\n warnings.warning(\n '\"mmcv.runner.fp16_utils.allreduce_grads\" is deprecated, and will be '\n 'removed in v2.8. Please switch to \"mmcv.runner.allreduce_grads')\n _allreduce_grads(params, coalesce=coalesce, bucket_size_mb=bucket_size_mb)\n\n\ndef wrap_fp16_model(model):\n \"\"\"Wrap the FP32 model to FP16.\n\n If you are using PyTorch >= 1.6, torch.cuda.amp is used as the\n backend, otherwise, original mmcv implementation will be adopted.\n\n For PyTorch >= 1.6, this function will\n 1. Set fp16 flag inside the model to True.\n\n Otherwise:\n 1. Convert FP32 model to FP16.\n 2. Remain some necessary layers to be FP32, e.g., normalization layers.\n 3. Set `fp16_enabled` flag inside the model to True.\n\n Args:\n model (nn.Module): Model in FP32.\n \"\"\"\n if (TORCH_VERSION == 'parrots'\n or digit_version(TORCH_VERSION) < digit_version('1.6.0')):\n # convert model to fp16\n model.half()\n # patch the normalization layers to make it work in fp32 mode\n patch_norm_fp32(model)\n # set `fp16_enabled` flag\n for m in model.modules():\n if hasattr(m, 'fp16_enabled'):\n m.fp16_enabled = True\n\n\ndef patch_norm_fp32(module):\n \"\"\"Recursively convert normalization layers from FP16 to FP32.\n\n Args:\n module (nn.Module): The modules to be converted in FP16.\n\n Returns:\n nn.Module: The converted module, the normalization layers have been\n converted to FP32.\n \"\"\"\n if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)):\n module.float()\n if isinstance(module, nn.GroupNorm) or torch.__version__ < '1.3':\n module.forward = patch_forward_method(module.forward, torch.half,\n torch.float)\n for child in module.children():\n patch_norm_fp32(child)\n return module\n\n\ndef patch_forward_method(func, src_type, dst_type, convert_output=True):\n \"\"\"Patch the forward method of a module.\n\n Args:\n func (callable): The original forward method.\n src_type (torch.dtype): Type of input arguments to be converted from.\n dst_type (torch.dtype): Type of input arguments to be converted to.\n convert_output (bool): Whether to convert the output back to src_type.\n\n Returns:\n callable: The patched forward method.\n \"\"\"\n\n def new_forward(*args, **kwargs):\n output = func(*cast_tensor_type(args, src_type, dst_type),\n **cast_tensor_type(kwargs, src_type, dst_type))\n if convert_output:\n output = cast_tensor_type(output, dst_type, src_type)\n return output\n\n return new_forward\n\n\nclass LossScaler:\n \"\"\"Class that manages loss scaling in mixed precision training which\n supports both dynamic or static mode.\n\n The implementation refers to\n https://github.com/NVIDIA/apex/blob/master/apex/fp16_utils/loss_scaler.py.\n Indirectly, by supplying ``mode='dynamic'`` for dynamic loss scaling.\n It's important to understand how :class:`LossScaler` operates.\n Loss scaling is designed to combat the problem of underflowing\n gradients encountered at long times when training fp16 networks.\n Dynamic loss scaling begins by attempting a very high loss\n scale. Ironically, this may result in OVERflowing gradients.\n If overflowing gradients are encountered, :class:`FP16_Optimizer` then\n skips the update step for this particular iteration/minibatch,\n and :class:`LossScaler` adjusts the loss scale to a lower value.\n If a certain number of iterations occur without overflowing gradients\n detected,:class:`LossScaler` increases the loss scale once more.\n In this way :class:`LossScaler` attempts to \"ride the edge\" of always\n using the highest loss scale possible without incurring overflow.\n\n Args:\n init_scale (float): Initial loss scale value, default: 2**32.\n scale_factor (float): Factor used when adjusting the loss scale.\n Default: 2.\n mode (str): Loss scaling mode. 'dynamic' or 'static'\n scale_window (int): Number of consecutive iterations without an\n overflow to wait before increasing the loss scale. Default: 1000.\n \"\"\"\n\n def __init__(self,\n init_scale=2**32,\n mode='dynamic',\n scale_factor=2.,\n scale_window=1000):\n self.cur_scale = init_scale\n self.cur_iter = 0\n assert mode in ('dynamic',\n 'static'), 'mode can only be dynamic or static'\n self.mode = mode\n self.last_overflow_iter = -1\n self.scale_factor = scale_factor\n self.scale_window = scale_window\n\n def has_overflow(self, params):\n \"\"\"Check if params contain overflow.\"\"\"\n if self.mode != 'dynamic':\n return False\n for p in params:\n if p.grad is not None and LossScaler._has_inf_or_nan(p.grad.data):\n return True\n return False\n\n def _has_inf_or_nan(x):\n \"\"\"Check if params contain NaN.\"\"\"\n try:\n cpu_sum = float(x.float().sum())\n except RuntimeError as instance:\n if 'value cannot be converted' not in instance.args[0]:\n raise\n return True\n else:\n if cpu_sum == float('inf') or cpu_sum == -float('inf') \\\n or cpu_sum != cpu_sum:\n return True\n return False\n\n def update_scale(self, overflow):\n \"\"\"update the current loss scale value when overflow happens.\"\"\"\n if self.mode != 'dynamic':\n return\n if overflow:\n self.cur_scale = max(self.cur_scale / self.scale_factor, 1)\n self.last_overflow_iter = self.cur_iter\n else:\n if (self.cur_iter - self.last_overflow_iter) % \\\n self.scale_window == 0:\n self.cur_scale *= self.scale_factor\n self.cur_iter += 1\n\n def state_dict(self):\n \"\"\"Returns the state of the scaler as a :class:`dict`.\"\"\"\n return dict(\n cur_scale=self.cur_scale,\n cur_iter=self.cur_iter,\n mode=self.mode,\n last_overflow_iter=self.last_overflow_iter,\n scale_factor=self.scale_factor,\n scale_window=self.scale_window)\n\n def load_state_dict(self, state_dict):\n \"\"\"Loads the loss_scaler state dict.\n\n Args:\n state_dict (dict): scaler state.\n \"\"\"\n self.cur_scale = state_dict['cur_scale']\n self.cur_iter = state_dict['cur_iter']\n self.mode = state_dict['mode']\n self.last_overflow_iter = state_dict['last_overflow_iter']\n self.scale_factor = state_dict['scale_factor']\n self.scale_window = state_dict['scale_window']\n\n @property\n def loss_scale(self):\n return self.cur_scale\n" ]
[ [ "torch.cuda.amp.autocast" ] ]
digitalinteraction/openmovement-python
[ "d07dcd66d5c402436ee1113ed2862f6c41aa61b1" ]
[ "src/openmovement/load/csv_load.py" ]
[ "import csv\nimport re\nimport datetime\nimport time\n\nimport numpy as np\nimport pandas as pd\n\nfrom openmovement.load.base_data import BaseData\n\n\n# Normalize the column labels (e.g. 'Time' to 'time'; 'Accel-X (g)' to 'accel_x'; 'Gyro-Z (d/s)' to 'gyro_z')\ndef _normalize_label(label):\n if label is None:\n return None # Or '' ?\n \n # Remove any strings in parentheses (e.g. units)\n reRemoveBracketed = re.compile('\\(.*?\\)')\n label = reRemoveBracketed.sub('', label)\n\n # Remove any multiple spaces\n reRemoveMultipleSpace = re.compile(' +')\n label = reRemoveMultipleSpace.sub(' ', label)\n\n # Remove any leading/trailing spaces\n label = label.strip()\n\n # Substitute spaces and hyphens with an underscore\n label = label.replace(' ', '_')\n label = label.replace('-', '_')\n\n # Enforce lower case\n label = label.lower()\n\n return label\n\n\n# Convert a timestamp-with-no-timezone into a datetime (using UTC even though unknown zone, alternative is naive datetime which is assumed to be in the current local computer's time)\ndef _csv_datetime(timestamp):\n return datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc)\n\n\n# Convert a timestamp-with-no-timezone into a ISO-ish string representation (using UTC even though unknown zone, alternative is naive datetime which is assumed to be in the current local computer's time)\ndef _csv_datetime_string(time):\n if not isinstance(time, datetime.datetime):\n time = _csv_datetime(time)\n return time.isoformat(sep=' ')[0:19]\n\n\n# Convert a timestamp-with-no-timezone into a ISO-ish string representation with milliseconds (using UTC even though unknown zone, alternative is naive datetime which is assumed to be in the current local computer's time)\ndef _csv_datetime_ms_string(time):\n if not isinstance(time, datetime.datetime):\n time = _csv_datetime(time)\n return time.isoformat(sep=' ',timespec='milliseconds')[0:23]\n\n\nclass CsvData(BaseData):\n \"\"\"\n Timeseries .CSV data.\n The first row can contain column headers.\n If there is a timestamp, it must be in the first column.\n If the timestamp is an ISO-ish date/time string, it is parsed as a time since the 1970 epoch date (in seconds, but as a datetime64[ns] by default for pandas).\n If the timestamp is numeric, the 'start_time' option may be added as an offset to apply in seconds.\n No timezone information is known (can treat as a UTC time to correctly recover the local date and time of day).\n All other columns must be numeric.\n \"\"\"\n\n def _read_data(self):\n if self.verbose: print('Opening CSV file...', flush=True)\n self.fh = open(self.filename, 'rb')\n try:\n import mmap\n self.full_buffer = mmap.mmap(self.fh.fileno(), 0, access=mmap.ACCESS_READ)\n if self.verbose: print('...mapped ' + str(len(self.full_buffer) / 1024 / 1024) + 'MB', flush=True)\n except Exception as e:\n print('WARNING: Problem using mmap (' + str(e) +') - falling back to reading whole file...', flush=True)\n self.full_buffer = self.fh.read()\n if self.verbose: print('...read ' + str(len(self.full_buffer) / 1024 / 1024) + 'MB', flush=True)\n\n def _parse_header(self):\n if self.verbose: print('Parsing header...', flush=True)\n\n # Take an initial chunk of data to inspect\n initial_chunk = self.full_buffer[0:4096].decode(encoding='utf-8')\n if len(initial_chunk) == 0:\n raise Exception('File has no data')\n initial_lines = initial_chunk.splitlines()\n if len(initial_lines) < 2:\n raise Exception('File has insufficient data (or initial header/row too long)')\n\n # Open to inspect header and data format\n sniffer = csv.Sniffer()\n dialect = sniffer.sniff(initial_chunk)\n\n # Process the first CSV row\n self.header = None\n csv_reader = csv.reader(initial_lines, dialect) # quoting=csv.QUOTE_NONNUMERIC\n if sniffer.has_header(initial_chunk):\n self.header = next(csv_reader)\n try:\n first_row = next(csv_reader)\n except StopIteration:\n first_row = []\n\n # Number of columns\n self.num_columns = len(first_row)\n\n # If no header, create a numerically-labelled header\n if self.header is None:\n self.has_header = False\n self.header = list(map(str, list(range(0, self.num_columns))))\n else:\n self.has_header = True\n\n # Derive labels from header\n self.labels = list(map(_normalize_label, self.header))\n\n # RegExp to match a formatted absolute date/time (an optional 'T', optional fractions of a second, optional 'Z' or timezone offset)\n date_time_re = re.compile('^\\d\\d\\d\\d-\\d\\d-\\d\\d[T ]\\d\\d:\\d\\d:\\d\\d(?:\\.\\d+)?(?:Z|[-+]\\d\\d:\\d\\d)?$')\n\n # Decide the type of timestamps we have based on the column heading or data format\n if self.num_columns > 0 and date_time_re.match(first_row[0]):\n self.timestamps_absolute = True # Timestamps are absolute date/times\n if self.verbose: print('Timestamps: absolute')\n elif len(self.header) > 0 and (_normalize_label(self.header[0]) == 'time' or self.force_time):\n self.timestamps_absolute = False # Timestamps are numeric\n if self.verbose: print('Timestamps: numeric')\n else:\n self.timestamps_absolute = None # Timestamps are missing\n if self.verbose: print('Timestamps: none')\n \n # Use a standard label for first column\n if self.timestamps_absolute is not None:\n self.labels[0] = 'time'\n\n\n def _parse_data(self):\n if self.timestamps_absolute == True:\n if self.verbose: print('Parsing data (timestamps)...', flush=True)\n # Read timestamped data with Pandas (slightly faster than numpy)\n pd_data = pd.read_csv(\n self.full_buffer, \n parse_dates=[0], # parse_dates=['date_utc'], \n infer_datetime_format=True, \n sep=',', \n usecols=list(range(0, self.num_columns)),\n header=None, # We've already inspected the headers\n skiprows=[0] if self.has_header else [],\n names=self.labels,\n )\n # Standardized to create a single ndarray -- convert from datetime64[ns] to time since epoch in seconds\n pd_data.iloc[:,0] = pd.to_numeric(pd_data.iloc[:,0]) / 1e9\n else:\n if self.verbose: print('Parsing data (non/numeric timestamps)...', flush=True)\n # Read numeric or non-timestamped data with Pandas\n pd_data = pd.read_csv(\n self.full_buffer, \n infer_datetime_format=True, \n sep=',', \n usecols=list(range(0, self.num_columns)),\n header=None, # We've already inspected the headers\n skiprows=[0] if self.has_header else [],\n names=self.labels,\n )\n\n self.sample_values = pd_data.to_numpy()\n\n \n def _interpret_samples(self):\n if self.verbose: print('Interpreting samples...', flush=True)\n\n # If we don't have any timestamps, but do have an assumed frequency, synthesize relative timestamps\n if self.timestamps_absolute is None and self.assumed_frequency is not None:\n if self.verbose: print('Timestamps: synthesize->numeric')\n timestamps = np.arange(self.sample_values.shape[0]) / self.assumed_frequency\n self.sample_values = np.insert(self.sample_values, 0, timestamps, axis=1)\n self.timestamps_absolute = False\n\n # Where timestamps are relative, add any supplied start time\n if self.timestamps_absolute == False and self.start_time != 0:\n if self.verbose: print('Timestamps: numeric + offset')\n self.sample_values[:,0] += self.start_time\n\n # Start by taking the assumed frequency\n self.frequency = self.assumed_frequency\n\n # Where possible, estimate the sample frequency from the timestamps\n # Can't assume that the data is uninterrupted, so not just the inverse of the mean frequency from the overall duration divided by number of samples\n if self.timestamps_absolute is not None and self.sample_values.shape[0] > 1:\n # Consider the timestamps (in seconds)\n timestamps = self.sample_values[:,0]\n # Sample N pairs of adjacent times linearly throughout the data\n num_pairs = 100\n first_index = 0\n last_index = timestamps.shape[0] - 1 # each index must form a pair with the subsequent one\n sample_index = (np.arange(0, num_pairs) * ((last_index - first_index) / num_pairs) + first_index).astype(int)\n # Calculate the interval between subsequent indexes\n intervals = timestamps[sample_index + 1] - timestamps[sample_index]\n # Take the median value as the interval\n median_interval = np.median(intervals)\n # The frequency estimate is the inverse\n self.frequency = round(1.0 / median_interval, 0)\n\n if self.verbose: print('Frequency estimate: ' + str(self.frequency))\n\n\n def __init__(self, filename, verbose=False, force_time=True, start_time=0, assumed_frequency=None):\n \"\"\"\n :param filename: The path to the .CSV file\n :param verbose: Output more detailed information.\n :param force_time: First column to be treated as time even if it doesn't look like an absolute timestamp and doesn't have a column header similar to 'time'.\n :param start_time: Seconds since the epoch to use as an initial time to use for relative numeric (rather than absolute) timestamps, or where the time is missing.\n :param assumed_frequency: Sampling frequency to assume if no timestamps are given.\n \"\"\"\n super().__init__(filename, verbose)\n self.force_time = force_time\n self.start_time = start_time\n self.assumed_frequency = assumed_frequency\n\n self.all_data_read = False\n\n self._read_data()\n self._parse_header()\n if self.verbose: print('...initialization done.', flush=True)\n\n\n # Current model reads all of the data in one go (and releases the file)\n def _ensure_all_data_read(self):\n start_time = time.time()\n if self.all_data_read:\n return\n self.all_data_read = True\n self._parse_data()\n self._interpret_samples()\n\n elapsed_time = time.time() - start_time\n if self.verbose: print('Read done... (elapsed=' + str(elapsed_time) + ')', flush=True)\n self.close()\n\n def close(self):\n \"\"\"Close the underlying file. Automatically closed in with() block or when GC'd.\"\"\"\n if hasattr(self, 'full_buffer') and self.full_buffer is not None:\n # Close if a mmap()\n if hasattr(self.full_buffer, 'close'):\n self.full_buffer.close()\n # Delete buffer (if large allocation not using mmap)\n del self.full_buffer\n self.full_buffer = None\n if hasattr(self, 'fh') and self.fh is not None:\n self.fh.close()\n self.fh = None\n\n def get_sample_values(self):\n \"\"\"\n Get the sample values as a single ndarray.\n\n :returns: An ndarray of the read data, e.g. (time, accel_x, accel_y, accel_z),\n where 'time' is normalized to seconds since the epoch if from timestamps.\n \"\"\"\n self._ensure_all_data_read()\n return self.sample_values\n\n def get_samples(self, use_datetime64=True):\n \"\"\"\n Return an DataFrame, e.g. (time, accel_x, accel_y, accel_z)\n\n :param use_datetime64: (Default) time is in datetime64[ns]; otherwise in seconds since the epoch.\n \"\"\"\n self._ensure_all_data_read()\n if self.timestamps_absolute is not None and use_datetime64:\n if self.verbose: print('Converting time...', flush=True)\n # Samples exclude the current time (float seconds) column\n samples = pd.DataFrame(self.sample_values[:,1:], columns=self.labels[1:])\n # Convert the float epoch time in seconds to a datetime64 integer in nanoseconds (Pandas default)\n time = (self.sample_values[:,0] * 1_000_000_000).astype('datetime64[ns]')\n # Add time as first column\n samples.insert(0, self.labels[0], time, True)\n if self.verbose: print('...done', flush=True)\n else:\n # Keep time (if used) in seconds\n samples = pd.DataFrame(self.sample_values, columns=self.labels)\n\n # Add sample metadata (start time in seconds since epoch, and configured sample frequency)\n samples.attrs['time'] = self.get_start_time()\n samples.attrs['fs'] = self.get_sample_rate()\n return samples\n\n # Time of first sample (seconds since epoch)\n def get_start_time(self):\n self._ensure_all_data_read()\n # For non-timestamped data, start at the given origin\n if self.timestamps_absolute is None:\n return self.start_time\n # Otherwise, the time of the first sample\n return self.sample_values[0,0]\n\n def get_sample_rate(self):\n self._ensure_all_data_read()\n return self.frequency\n\n def get_num_samples(self):\n self._ensure_all_data_read()\n return self.sample_values.shape[0]\n\n\n\ndef main():\n filename = '../_local/data/sample.csv'\n #filename = '../../../_local/data/sample.csv'\n #filename = '../../../_local/data/mixed_wear.csv'\n\n with CsvData(filename, verbose=True) as csv_data:\n sample_values = csv_data.get_sample_values()\n samples = csv_data.get_samples()\n pass\n\n print(sample_values)\n print(samples)\n\n print('Done')\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.arange", "numpy.median", "pandas.DataFrame", "numpy.insert", "pandas.to_numeric" ] ]
mcharsley/google-cloud-cpp
[ "934b05112555e661035930c10ebc0a8011d519ac" ]
[ "google/cloud/storage/benchmarks/storage_throughput_plots.py" ]
[ "#!/usr/bin/env python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Summarize the results from running storage_throughput_benchmark.\"\"\"\n\n# %%\nimport argparse\nimport pandas as pd\nimport plotnine as p9\nfrom scipy.stats import mannwhitneyu\n\n\n# %%\npd.set_option('precision', 2)\n\n\n# %%\ndef load_benchmark_output(file):\n \"\"\"Loads the output generated by storage_throughput_benchmark.\"\"\"\n df = pd.read_csv(file, comment='#', names=['Op', 'Api', 'Bytes', 'ElapsedMs'])\n df['MiB'] = df.Bytes / 1024 / 1024\n df['MiBs'] = df.MiB * 1000 / df.ElapsedMs\n return df\n\n\n# %%\ndef compare_api(df, op_name, alpha=0.05):\n subset = df[df.Op == op_name]\n stat, p = mannwhitneyu(subset[subset.Api == 'XML'].MiBs, subset[subset.Api == 'JSON'].MiBs)\n print('\\n\\n===== %s XML vs. JSON =====\\np-value=%.3f Statistics=%.3f' % (op_name, p, stat))\n print(subset.groupby(by='Api').MiBs.describe(percentiles=[.50, .90, .95]))\n\n if p > alpha:\n print('%s/XML vs. READ/JSON: same distribution (fail to reject H0)' % op_name)\n else:\n print('%s/XML vs. READ/JSON: different distribution (reject H0)' % op_name)\n\n\n# %%\nparser = argparse.ArgumentParser()\nparser.add_argument('--input-file', type=argparse.FileType('r'), required=True,\n help='the benchmark output file to load')\nparser.add_argument('--output-file', type=str, required=True,\n help='the name for the output plot')\nargs = parser.parse_args()\n\n# %%\ndata = load_benchmark_output(args.input_file)\n\n# %%\nprint(data.head())\n\n# %%\nprint(data.describe())\n\n# %%\n(p9.ggplot(data=data[(data.Op != 'CREATE') & (data.Op != 'DELETE')],\n mapping=p9.aes(x='Op', y='MiBs', color='Api'))\n + p9.facet_wrap(facets='Op', labeller='label_both', scales='free')\n + p9.geom_boxplot()).save(args.output_file)\n\n# %%\ncompare_api(data, 'READ')\ncompare_api(data, 'WRITE')\n" ]
[ [ "scipy.stats.mannwhitneyu", "pandas.set_option", "pandas.read_csv" ] ]
1617226214/hybrid_space
[ "2ad8946d44fb61c72787f10f4596a08ca80b5960" ]
[ "tester.py" ]
[ "import os\r\nimport sys\r\nimport json\r\nimport torch\r\nimport pickle\r\nimport logging\r\nimport argparse\r\n\r\nimport evaluation\r\nfrom model import get_model\r\nfrom validate import norm_score, cal_perf\r\n\r\nimport util.data_provider as data\r\nfrom util.text2vec import get_text_encoder\r\nimport util.metrics as metrics\r\nfrom util.vocab import Vocabulary\r\n\r\nfrom basic.util import read_dict, log_config\r\nfrom basic.constant import ROOT_PATH\r\nfrom basic.bigfile import BigFile\r\nfrom basic.common import makedirsforfile, checkToSkip\r\n\r\ndef parse_args():\r\n # Hyper Parameters\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--testCollection', type=str, help='test collection')\r\n parser.add_argument('--rootpath', type=str, default=ROOT_PATH, help='path to datasets. (default: %s)'%ROOT_PATH)\r\n parser.add_argument('--overwrite', type=int, default=0, choices=[0,1], help='overwrite existed file. (default: 0)')\r\n parser.add_argument('--log_step', default=100, type=int, help='Number of steps to print and record the log.')\r\n parser.add_argument('--batch_size', default=128, type=int, help='Size of a training mini-batch.')\r\n parser.add_argument('--workers', default=5, type=int, help='Number of data loader workers.')\r\n parser.add_argument('--logger_name', default='runs', help='Path to save the model and Tensorboard log.')\r\n parser.add_argument('--checkpoint_name', default='model_best.pth.tar', type=str, help='name of checkpoint (default: model_best.pth.tar)')\r\n\r\n args = parser.parse_args()\r\n return args\r\n\r\n\r\ndef main():\r\n opt = parse_args()\r\n print(json.dumps(vars(opt), indent=2))\r\n\r\n rootpath = opt.rootpath\r\n # n_caption = opt.n_caption\r\n resume = os.path.join(opt.logger_name, opt.checkpoint_name)\r\n\r\n if not os.path.exists(resume):\r\n logging.info(resume + ' not exists.')\r\n sys.exit(0)\r\n\r\n checkpoint = torch.load(resume)\r\n start_epoch = checkpoint['epoch']\r\n best_rsum = checkpoint['best_rsum']\r\n print(\"=> loaded checkpoint '{}' (epoch {}, best_rsum {})\"\r\n .format(resume, start_epoch, best_rsum))\r\n options = checkpoint['opt']\r\n\r\n\r\n if opt.testCollection is None:\r\n testCollection = options.testCollection\r\n collections_pathname = options.collections_pathname\r\n else:\r\n testCollection = opt.testCollection\r\n collections_pathname = options.collections_pathname\r\n collections_pathname['test'] = testCollection\r\n\r\n\r\n trainCollection = options.trainCollection\r\n output_dir = resume.replace(trainCollection, testCollection)\r\n if 'checkpoints' in output_dir:\r\n output_dir = output_dir.replace('/checkpoints/', '/results/')\r\n else:\r\n output_dir = output_dir.replace('/%s/' % options.cv_name, '/results/%s/%s/' % (options.cv_name, trainCollection))\r\n result_pred_sents = os.path.join(output_dir, 'id.sent.score.txt')\r\n pred_error_matrix_file = os.path.join(output_dir, 'pred_errors_matrix.pth.tar')\r\n if checkToSkip(pred_error_matrix_file, opt.overwrite):\r\n sys.exit(0)\r\n makedirsforfile(pred_error_matrix_file)\r\n\r\n log_config(output_dir)\r\n logging.info(json.dumps(vars(opt), indent=2))\r\n\r\n # data loader prepare\r\n test_cap = os.path.join(rootpath, collections_pathname['test'], 'TextData', '%s.caption.txt'%testCollection)\r\n if not os.path.exists(test_cap):\r\n test_cap = os.path.join(rootpath, collections_pathname['test'], 'TextData', '%stest.caption.txt'%testCollection)\r\n caption_files = {'test': test_cap}\r\n img_feat_path = os.path.join(rootpath, collections_pathname['test'], 'FeatureData', options.visual_feature)\r\n visual_feats = {'test': BigFile(img_feat_path)}\r\n assert options.visual_feat_dim == visual_feats['test'].ndims\r\n video2frames = {'test': read_dict(os.path.join(rootpath, collections_pathname['test'], 'FeatureData', options.visual_feature, 'video2frames.txt'))}\r\n\r\n # set bow vocabulary and encoding\r\n bow_vocab_file = os.path.join(rootpath, collections_pathname['train'], 'TextData', 'vocabulary', 'bow', options.vocab+'.pkl')\r\n bow_vocab = pickle.load(open(bow_vocab_file, 'rb'))\r\n bow2vec = get_text_encoder('bow')(bow_vocab)\r\n options.bow_vocab_size = len(bow_vocab)\r\n\r\n # set rnn vocabulary \r\n rnn_vocab_file = os.path.join(rootpath, collections_pathname['train'], 'TextData', 'vocabulary', 'rnn', options.vocab+'.pkl')\r\n rnn_vocab = pickle.load(open(rnn_vocab_file, 'rb'))\r\n options.vocab_size = len(rnn_vocab)\r\n\r\n # Construct the model\r\n model = get_model(options.model)(options)\r\n model.load_state_dict(checkpoint['model'])\r\n model.Eiters = checkpoint['Eiters']\r\n model.val_start()\r\n\r\n # set data loader\r\n video_ids_list = data.read_video_ids(caption_files['test'])\r\n vid_data_loader = data.get_vis_data_loader(visual_feats['test'], opt.batch_size, opt.workers, video2frames['test'], video_ids=video_ids_list)\r\n text_data_loader = data.get_txt_data_loader(caption_files['test'], rnn_vocab, bow2vec, opt.batch_size, opt.workers)\r\n\r\n # mapping\r\n if options.space == 'hybrid':\r\n video_embs, video_tag_probs, video_ids = evaluation.encode_text_or_vid_tag_hist_prob(model.embed_vis, vid_data_loader)\r\n cap_embs, cap_tag_probs, caption_ids = evaluation.encode_text_or_vid_tag_hist_prob(model.embed_txt, text_data_loader)\r\n else:\r\n video_embs, video_ids = evaluation.encode_text_or_vid(model.embed_vis, vid_data_loader)\r\n cap_embs, caption_ids = evaluation.encode_text_or_vid(model.embed_txt, text_data_loader)\r\n\r\n\r\n v2t_gt, t2v_gt = metrics.get_gt(video_ids, caption_ids)\r\n\r\n logging.info(\"write into: %s\" % output_dir)\r\n if options.space != 'latent':\r\n tag_vocab_path = os.path.join(rootpath, collections_pathname['train'], 'TextData', 'tags', 'video_label_th_1', 'tag_vocab_%d.json' % options.tag_vocab_size)\r\n evaluation.pred_tag(video_tag_probs, video_ids, tag_vocab_path, os.path.join(output_dir, 'video'))\r\n evaluation.pred_tag(cap_tag_probs, caption_ids, tag_vocab_path, os.path.join(output_dir, 'text'))\r\n \r\n if options.space in ['latent', 'hybrid']:\r\n # logging.info(\"=======Latent Space=======\")\r\n t2v_all_errors_1 = evaluation.cal_error(video_embs, cap_embs, options.measure)\r\n\r\n if options.space in ['concept', 'hybrid']:\r\n # logging.info(\"=======Concept Space=======\")\r\n t2v_all_errors_2 = evaluation.cal_error_batch(video_tag_probs, cap_tag_probs, options.measure_2)\r\n \r\n if options.space in ['hybrid']:\r\n w = 0.6\r\n t2v_all_errors_1 = norm_score(t2v_all_errors_1)\r\n t2v_all_errors_2 = norm_score(t2v_all_errors_2)\r\n t2v_tag_all_errors = w*t2v_all_errors_1 + (1-w)*t2v_all_errors_2\r\n cal_perf(t2v_tag_all_errors, v2t_gt, t2v_gt)\r\n torch.save({'errors': t2v_tag_all_errors, 'videos': video_ids, 'captions': caption_ids}, pred_error_matrix_file) \r\n logging.info(\"write into: %s\" % pred_error_matrix_file)\r\n\r\n elif options.space in ['latent']:\r\n cal_perf(t2v_all_errors_1, v2t_gt, t2v_gt)\r\n torch.save({'errors': t2v_all_errors_1, 'videos': video_ids, 'captions': caption_ids}, pred_error_matrix_file) \r\n logging.info(\"write into: %s\" % pred_error_matrix_file)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" ]
[ [ "torch.save", "torch.load" ] ]
WDZRMPCBIT/SCoRE
[ "c426e58c253f5d97fc4ad0e0fea9606f70cff872", "c426e58c253f5d97fc4ad0e0fea9606f70cff872", "c426e58c253f5d97fc4ad0e0fea9606f70cff872" ]
[ "sqa/pytorch_transformers/convert_pytorch_checkpoint_to_tf.py", "sqa/pytorch_transformers/convert_transfo_xl_checkpoint_to_pytorch.py", "sqa/pytorch_transformers/modeling_roberta.py" ]
[ "# coding=utf-8\r\n# Copyright 2018 The HuggingFace Inc. team.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n\"\"\"Convert Huggingface Pytorch checkpoint to Tensorflow checkpoint.\"\"\"\r\n\r\nimport os\r\nimport argparse\r\nimport torch\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom pytorch_transformers.modeling import BertModel\r\n\r\n\r\ndef convert_pytorch_checkpoint_to_tf(model:BertModel, ckpt_dir:str, model_name:str):\r\n\r\n \"\"\"\r\n :param model:BertModel Pytorch model instance to be converted\r\n :param ckpt_dir: Tensorflow model directory\r\n :param model_name: model name\r\n :return:\r\n\r\n Currently supported HF models:\r\n Y BertModel\r\n N BertForMaskedLM\r\n N BertForPreTraining\r\n N BertForMultipleChoice\r\n N BertForNextSentencePrediction\r\n N BertForSequenceClassification\r\n N BertForQuestionAnswering\r\n \"\"\"\r\n\r\n tensors_to_transpose = (\r\n \"dense.weight\",\r\n \"attention.self.query\",\r\n \"attention.self.key\",\r\n \"attention.self.value\"\r\n )\r\n\r\n var_map = (\r\n ('layer.', 'layer_'),\r\n ('word_embeddings.weight', 'word_embeddings'),\r\n ('position_embeddings.weight', 'position_embeddings'),\r\n ('token_type_embeddings.weight', 'token_type_embeddings'),\r\n ('.', '/'),\r\n ('LayerNorm/weight', 'LayerNorm/gamma'),\r\n ('LayerNorm/bias', 'LayerNorm/beta'),\r\n ('weight', 'kernel')\r\n )\r\n\r\n if not os.path.isdir(ckpt_dir):\r\n os.makedirs(ckpt_dir)\r\n\r\n state_dict = model.state_dict()\r\n\r\n def to_tf_var_name(name:str):\r\n for patt, repl in iter(var_map):\r\n name = name.replace(patt, repl)\r\n return 'bert/{}'.format(name)\r\n\r\n def create_tf_var(tensor:np.ndarray, name:str, session:tf.Session):\r\n tf_dtype = tf.dtypes.as_dtype(tensor.dtype)\r\n tf_var = tf.get_variable(dtype=tf_dtype, shape=tensor.shape, name=name, initializer=tf.zeros_initializer())\r\n session.run(tf.variables_initializer([tf_var]))\r\n session.run(tf_var)\r\n return tf_var\r\n\r\n tf.reset_default_graph()\r\n with tf.Session() as session:\r\n for var_name in state_dict:\r\n tf_name = to_tf_var_name(var_name)\r\n torch_tensor = state_dict[var_name].numpy()\r\n if any([x in var_name for x in tensors_to_transpose]):\r\n torch_tensor = torch_tensor.T\r\n tf_var = create_tf_var(tensor=torch_tensor, name=tf_name, session=session)\r\n tf.keras.backend.set_value(tf_var, torch_tensor)\r\n tf_weight = session.run(tf_var)\r\n print(\"Successfully created {}: {}\".format(tf_name, np.allclose(tf_weight, torch_tensor)))\r\n\r\n saver = tf.train.Saver(tf.trainable_variables())\r\n saver.save(session, os.path.join(ckpt_dir, model_name.replace(\"-\", \"_\") + \".ckpt\"))\r\n\r\n\r\ndef main(raw_args=None):\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--model_name\",\r\n type=str,\r\n required=True,\r\n help=\"model name e.g. bert-base-uncased\")\r\n parser.add_argument(\"--cache_dir\",\r\n type=str,\r\n default=None,\r\n required=False,\r\n help=\"Directory containing pytorch model\")\r\n parser.add_argument(\"--pytorch_model_path\",\r\n type=str,\r\n required=True,\r\n help=\"/path/to/<pytorch-model-name>.bin\")\r\n parser.add_argument(\"--tf_cache_dir\",\r\n type=str,\r\n required=True,\r\n help=\"Directory in which to save tensorflow model\")\r\n args = parser.parse_args(raw_args)\r\n \r\n model = BertModel.from_pretrained(\r\n pretrained_model_name_or_path=args.model_name,\r\n state_dict=torch.load(args.pytorch_model_path),\r\n cache_dir=args.cache_dir\r\n )\r\n \r\n convert_pytorch_checkpoint_to_tf(\r\n model=model,\r\n ckpt_dir=args.tf_cache_dir,\r\n model_name=args.model_name\r\n )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n", "# coding=utf-8\r\n# Copyright 2018 The HuggingFace Inc. team.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"Convert Transformer XL checkpoint and datasets.\"\"\"\r\n\r\nfrom __future__ import absolute_import, division, print_function\r\n\r\nimport argparse\r\nimport os\r\nimport sys\r\nfrom io import open\r\n\r\nimport torch\r\n\r\nimport pytorch_transformers.tokenization_transfo_xl as data_utils\r\n\r\nfrom pytorch_transformers import CONFIG_NAME, WEIGHTS_NAME\r\nfrom pytorch_transformers.modeling_transfo_xl import (TransfoXLConfig, TransfoXLLMHeadModel,\r\n load_tf_weights_in_transfo_xl)\r\nfrom pytorch_transformers.tokenization_transfo_xl import (CORPUS_NAME, VOCAB_FILES_NAMES)\r\n\r\nif sys.version_info[0] == 2:\r\n import cPickle as pickle\r\nelse:\r\n import pickle\r\n\r\nimport logging\r\nlogging.basicConfig(level=logging.INFO)\r\n\r\n# We do this to be able to load python 2 datasets pickles\r\n# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918\r\ndata_utils.Vocab = data_utils.TransfoXLTokenizer\r\ndata_utils.Corpus = data_utils.TransfoXLCorpus\r\nsys.modules['data_utils'] = data_utils\r\nsys.modules['vocabulary'] = data_utils\r\n\r\ndef convert_transfo_xl_checkpoint_to_pytorch(tf_checkpoint_path,\r\n transfo_xl_config_file,\r\n pytorch_dump_folder_path,\r\n transfo_xl_dataset_file):\r\n if transfo_xl_dataset_file:\r\n # Convert a pre-processed corpus (see original TensorFlow repo)\r\n with open(transfo_xl_dataset_file, \"rb\") as fp:\r\n corpus = pickle.load(fp, encoding=\"latin1\")\r\n # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)\r\n pytorch_vocab_dump_path = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']\r\n print(\"Save vocabulary to {}\".format(pytorch_vocab_dump_path))\r\n corpus_vocab_dict = corpus.vocab.__dict__\r\n torch.save(corpus_vocab_dict, pytorch_vocab_dump_path)\r\n\r\n corpus_dict_no_vocab = corpus.__dict__\r\n corpus_dict_no_vocab.pop('vocab', None)\r\n pytorch_dataset_dump_path = pytorch_dump_folder_path + '/' + CORPUS_NAME\r\n print(\"Save dataset to {}\".format(pytorch_dataset_dump_path))\r\n torch.save(corpus_dict_no_vocab, pytorch_dataset_dump_path)\r\n\r\n if tf_checkpoint_path:\r\n # Convert a pre-trained TensorFlow model\r\n config_path = os.path.abspath(transfo_xl_config_file)\r\n tf_path = os.path.abspath(tf_checkpoint_path)\r\n\r\n print(\"Converting Transformer XL checkpoint from {} with config at {}\".format(tf_path, config_path))\r\n # Initialise PyTorch model\r\n if transfo_xl_config_file == \"\":\r\n config = TransfoXLConfig()\r\n else:\r\n config = TransfoXLConfig.from_json_file(transfo_xl_config_file)\r\n print(\"Building PyTorch model from configuration: {}\".format(str(config)))\r\n model = TransfoXLLMHeadModel(config)\r\n\r\n model = load_tf_weights_in_transfo_xl(model, config, tf_path)\r\n # Save pytorch-model\r\n pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)\r\n pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)\r\n print(\"Save PyTorch model to {}\".format(os.path.abspath(pytorch_weights_dump_path)))\r\n torch.save(model.state_dict(), pytorch_weights_dump_path)\r\n print(\"Save configuration file to {}\".format(os.path.abspath(pytorch_config_dump_path)))\r\n with open(pytorch_config_dump_path, \"w\", encoding=\"utf-8\") as f:\r\n f.write(config.to_json_string())\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--pytorch_dump_folder_path\",\r\n default = None,\r\n type = str,\r\n required = True,\r\n help = \"Path to the folder to store the PyTorch model or dataset/vocab.\")\r\n parser.add_argument(\"--tf_checkpoint_path\",\r\n default = \"\",\r\n type = str,\r\n help = \"An optional path to a TensorFlow checkpoint path to be converted.\")\r\n parser.add_argument(\"--transfo_xl_config_file\",\r\n default = \"\",\r\n type = str,\r\n help = \"An optional config json file corresponding to the pre-trained BERT model. \\n\"\r\n \"This specifies the model architecture.\")\r\n parser.add_argument(\"--transfo_xl_dataset_file\",\r\n default = \"\",\r\n type = str,\r\n help = \"An optional dataset file to be converted in a vocabulary.\")\r\n args = parser.parse_args()\r\n convert_transfo_xl_checkpoint_to_pytorch(args.tf_checkpoint_path,\r\n args.transfo_xl_config_file,\r\n args.pytorch_dump_folder_path,\r\n args.transfo_xl_dataset_file)\r\n", "# coding=utf-8\r\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\r\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"PyTorch RoBERTa model. \"\"\"\r\n\r\nfrom __future__ import (absolute_import, division, print_function,\r\n unicode_literals)\r\n\r\nimport logging\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.nn import CrossEntropyLoss, MSELoss\r\n\r\nfrom pytorch_transformers.modeling_bert import (BertConfig, BertEmbeddings,\r\n BertLayerNorm, BertModel,\r\n BertPreTrainedModel, gelu)\r\n\r\nfrom pytorch_transformers.modeling_utils import add_start_docstrings\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\nROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP = {\r\n 'roberta-base': \"https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-pytorch_model.bin\",\r\n 'roberta-large': \"https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-pytorch_model.bin\",\r\n 'roberta-large-mnli': \"https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-pytorch_model.bin\",\r\n}\r\n\r\nROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP = {\r\n 'roberta-base': \"https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-config.json\",\r\n 'roberta-large': \"https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-config.json\",\r\n 'roberta-large-mnli': \"https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-config.json\",\r\n}\r\n\r\n\r\nclass RobertaEmbeddings(BertEmbeddings):\r\n \"\"\"\r\n Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.\r\n \"\"\"\r\n def __init__(self, config):\r\n super(RobertaEmbeddings, self).__init__(config)\r\n self.padding_idx = 1\r\n\r\n def forward(self, input_ids, token_type_ids=None, position_ids=None):\r\n seq_length = input_ids.size(1)\r\n if position_ids is None:\r\n # Position numbers begin at padding_idx+1. Padding symbols are ignored.\r\n # cf. fairseq's `utils.make_positions`\r\n position_ids = torch.arange(self.padding_idx+1, seq_length+self.padding_idx+1, dtype=torch.long, device=input_ids.device)\r\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\r\n return super(RobertaEmbeddings, self).forward(input_ids, token_type_ids=token_type_ids, position_ids=position_ids)\r\n\r\n\r\nclass RobertaConfig(BertConfig):\r\n pretrained_config_archive_map = ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP\r\n\r\n\r\nROBERTA_START_DOCSTRING = r\"\"\" The RoBERTa model was proposed in\r\n `RoBERTa: A Robustly Optimized BERT Pretraining Approach`_\r\n by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer,\r\n Veselin Stoyanov. It is based on Google's BERT model released in 2018.\r\n \r\n It builds on BERT and modifies key hyperparameters, removing the next-sentence pretraining\r\n objective and training with much larger mini-batches and learning rates.\r\n \r\n This implementation is the same as BertModel with a tiny embeddings tweak as well as a setup for Roberta pretrained \r\n models.\r\n\r\n This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and\r\n refer to the PyTorch documentation for all matter related to general usage and behavior.\r\n\r\n .. _`RoBERTa: A Robustly Optimized BERT Pretraining Approach`:\r\n https://arxiv.org/abs/1907.11692\r\n\r\n .. _`torch.nn.Module`:\r\n https://pytorch.org/docs/stable/nn.html#module\r\n\r\n Parameters:\r\n config (:class:`~pytorch_transformers.RobertaConfig`): Model configuration class with all the parameters of the \r\n model. Initializing with a config file does not load the weights associated with the model, only the configuration.\r\n Check out the :meth:`~pytorch_transformers.PreTrainedModel.from_pretrained` method to load the model weights.\r\n\"\"\"\r\n\r\nROBERTA_INPUTS_DOCSTRING = r\"\"\"\r\n Inputs:\r\n **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\r\n Indices of input sequence tokens in the vocabulary.\r\n To match pre-training, RoBERTa input sequence should be formatted with <s> and </s> tokens as follows:\r\n\r\n (a) For sequence pairs:\r\n\r\n ``tokens: <s> Is this Jacksonville ? </s> </s> No it is not . </s>``\r\n\r\n (b) For single sequences:\r\n\r\n ``tokens: <s> the dog is hairy . </s>``\r\n\r\n Fully encoded sequences or sequence pairs can be obtained using the RobertaTokenizer.encode function with \r\n the ``add_special_tokens`` parameter set to ``True``.\r\n\r\n RoBERTa is a model with absolute position embeddings so it's usually advised to pad the inputs on\r\n the right rather than the left.\r\n\r\n See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and\r\n :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.\r\n **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\r\n Indices of positions of each input sequence tokens in the position embeddings.\r\n Selected in the range ``[0, config.max_position_embeddings - 1[``.\r\n **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:\r\n Mask to avoid performing attention on padding token indices.\r\n Mask values selected in ``[0, 1]``:\r\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\r\n **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:\r\n Mask to nullify selected heads of the self-attention modules.\r\n Mask values selected in ``[0, 1]``:\r\n ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.\r\n\"\"\"\r\n\r\n@add_start_docstrings(\"The bare RoBERTa Model transformer outputing raw hidden-states without any specific head on top.\",\r\n ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING)\r\nclass RobertaModel(BertModel):\r\n r\"\"\"\r\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\r\n **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``\r\n Sequence of hidden-states at the output of the last layer of the model.\r\n **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``\r\n Last layer hidden-state of the first token of the sequence (classification token)\r\n further processed by a Linear layer and a Tanh activation function. The Linear\r\n layer weights are trained from the next sentence prediction (classification)\r\n objective during Bert pretraining. This output is usually *not* a good summary\r\n of the semantic content of the input, you're often better with averaging or pooling\r\n the sequence of hidden-states for the whole input sequence.\r\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\r\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\r\n of shape ``(batch_size, sequence_length, hidden_size)``:\r\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\r\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\r\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\r\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\r\n\r\n Examples::\r\n\r\n tokenizer = RobertaTokenizer.from_pretrained('roberta-base')\r\n model = RobertaModel.from_pretrained('roberta-base')\r\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\r\n outputs = model(input_ids)\r\n last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\r\n\r\n \"\"\"\r\n config_class = RobertaConfig\r\n pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP\r\n base_model_prefix = \"roberta\"\r\n\r\n def __init__(self, config):\r\n super(RobertaModel, self).__init__(config)\r\n\r\n self.embeddings = RobertaEmbeddings(config)\r\n self.init_weights()\r\n\r\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, position_ids=None, head_mask=None):\r\n if input_ids[:, 0].sum().item() != 0:\r\n logger.warning(\"A sequence with no special tokens has been passed to the RoBERTa model. \"\r\n \"This model requires special tokens in order to work. \"\r\n \"Please specify add_special_tokens=True in your encoding.\")\r\n return super(RobertaModel, self).forward(input_ids, token_type_ids, attention_mask, position_ids, head_mask)\r\n\r\n\r\n@add_start_docstrings(\"\"\"RoBERTa Model with a `language modeling` head on top. \"\"\",\r\n ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING)\r\nclass RobertaForMaskedLM(BertPreTrainedModel):\r\n r\"\"\"\r\n **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\r\n Labels for computing the masked language modeling loss.\r\n Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\r\n Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels\r\n in ``[0, ..., config.vocab_size]``\r\n\r\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\r\n **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\r\n Masked language modeling loss.\r\n **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``\r\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\r\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\r\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\r\n of shape ``(batch_size, sequence_length, hidden_size)``:\r\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\r\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\r\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\r\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\r\n\r\n Examples::\r\n\r\n tokenizer = RobertaTokenizer.from_pretrained('roberta-base')\r\n model = RobertaForMaskedLM.from_pretrained('roberta-base')\r\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\r\n outputs = model(input_ids, masked_lm_labels=input_ids)\r\n loss, prediction_scores = outputs[:2]\r\n\r\n \"\"\"\r\n config_class = RobertaConfig\r\n pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP\r\n base_model_prefix = \"roberta\"\r\n\r\n def __init__(self, config):\r\n super(RobertaForMaskedLM, self).__init__(config)\r\n\r\n self.roberta = RobertaModel(config)\r\n self.lm_head = RobertaLMHead(config)\r\n\r\n self.init_weights()\r\n self.tie_weights()\r\n\r\n def tie_weights(self):\r\n \"\"\" Make sure we are sharing the input and output embeddings.\r\n Export to TorchScript can't handle parameter sharing so we are cloning them instead.\r\n \"\"\"\r\n self._tie_or_clone_weights(self.lm_head.decoder, self.roberta.embeddings.word_embeddings)\r\n\r\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, position_ids=None,\r\n head_mask=None):\r\n outputs = self.roberta(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,\r\n attention_mask=attention_mask, head_mask=head_mask)\r\n sequence_output = outputs[0]\r\n prediction_scores = self.lm_head(sequence_output)\r\n\r\n outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here\r\n\r\n if masked_lm_labels is not None:\r\n loss_fct = CrossEntropyLoss(ignore_index=-1)\r\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))\r\n outputs = (masked_lm_loss,) + outputs\r\n\r\n return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)\r\n\r\n\r\nclass RobertaLMHead(nn.Module):\r\n \"\"\"Roberta Head for masked language modeling.\"\"\"\r\n\r\n def __init__(self, config):\r\n super(RobertaLMHead, self).__init__()\r\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\r\n self.layer_norm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\r\n\r\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\r\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\r\n\r\n def forward(self, features, **kwargs):\r\n x = self.dense(features)\r\n x = gelu(x)\r\n x = self.layer_norm(x)\r\n\r\n # project back to size of vocabulary with bias\r\n x = self.decoder(x) + self.bias\r\n\r\n return x\r\n\r\n\r\n@add_start_docstrings(\"\"\"RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer \r\n on top of the pooled output) e.g. for GLUE tasks. \"\"\",\r\n ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING)\r\nclass RobertaForSequenceClassification(BertPreTrainedModel):\r\n r\"\"\"\r\n **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\r\n Labels for computing the sequence classification/regression loss.\r\n Indices should be in ``[0, ..., config.num_labels]``.\r\n If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),\r\n If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).\r\n\r\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\r\n **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\r\n Classification (or regression if config.num_labels==1) loss.\r\n **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``\r\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\r\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\r\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\r\n of shape ``(batch_size, sequence_length, hidden_size)``:\r\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\r\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\r\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\r\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\r\n\r\n Examples::\r\n\r\n tokenizer = RoertaTokenizer.from_pretrained('roberta-base')\r\n model = RobertaForSequenceClassification.from_pretrained('roberta-base')\r\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\r\n labels = torch.tensor([1]).unsqueeze(0) # Batch size 1\r\n outputs = model(input_ids, labels=labels)\r\n loss, logits = outputs[:2]\r\n\r\n \"\"\"\r\n config_class = RobertaConfig\r\n pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP\r\n base_model_prefix = \"roberta\"\r\n\r\n def __init__(self, config):\r\n super(RobertaForSequenceClassification, self).__init__(config)\r\n self.num_labels = config.num_labels\r\n\r\n self.roberta = RobertaModel(config)\r\n self.classifier = RobertaClassificationHead(config)\r\n \r\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,\r\n position_ids=None, head_mask=None):\r\n outputs = self.roberta(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,\r\n attention_mask=attention_mask, head_mask=head_mask)\r\n sequence_output = outputs[0]\r\n logits = self.classifier(sequence_output)\r\n\r\n outputs = (logits,) + outputs[2:]\r\n if labels is not None:\r\n if self.num_labels == 1:\r\n # We are doing regression\r\n loss_fct = MSELoss()\r\n loss = loss_fct(logits.view(-1), labels.view(-1))\r\n else:\r\n loss_fct = CrossEntropyLoss()\r\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\r\n outputs = (loss,) + outputs\r\n\r\n return outputs # (loss), logits, (hidden_states), (attentions)\r\n\r\n\r\n\r\nclass RobertaClassificationHead(nn.Module):\r\n \"\"\"Head for sentence-level classification tasks.\"\"\"\r\n\r\n def __init__(self, config):\r\n super(RobertaClassificationHead, self).__init__()\r\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\r\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\r\n self.out_proj = nn.Linear(config.hidden_size, config.num_labels)\r\n\r\n def forward(self, features, **kwargs):\r\n x = features[:, 0, :] # take <s> token (equiv. to [CLS])\r\n x = self.dropout(x)\r\n x = self.dense(x)\r\n x = torch.tanh(x)\r\n x = self.dropout(x)\r\n x = self.out_proj(x)\r\n return x\r\n" ]
[ [ "tensorflow.keras.backend.set_value", "numpy.allclose", "torch.load", "tensorflow.zeros_initializer", "tensorflow.variables_initializer", "tensorflow.reset_default_graph", "tensorflow.Session", "tensorflow.trainable_variables", "tensorflow.dtypes.as_dtype" ], [ "torch.save" ], [ "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.zeros", "torch.tanh", "torch.nn.Linear", "torch.arange", "torch.nn.MSELoss" ] ]
wbwatkinson/determined
[ "f9e099e06746a79a2eaf51a89acc264fc5c301e1" ]
[ "harness/determined/keras/_tf_keras_trial.py" ]
[ "import inspect\nimport logging\nimport pathlib\nimport pickle\nimport random\nimport sys\nfrom abc import abstractmethod\nfrom typing import Any, Dict, List, Optional, cast\n\nimport h5py\nimport numpy as np\nimport tensorflow as tf\nfrom packaging import version\nfrom tensorflow.keras.models import Model\nfrom tensorflow.python.framework.ops import EagerTensor\nfrom tensorflow.python.keras.callbacks import CallbackList, make_logs, set_callback_parameters\nfrom tensorflow.python.keras.saving.hdf5_format import (\n load_optimizer_weights_from_hdf5_group,\n save_optimizer_weights_to_hdf5_group,\n)\nfrom tensorflow.python.keras.utils.mode_keys import ModeKeys\n\nimport determined as det\nfrom determined import horovod, keras, util, workload\nfrom determined._tf_rng import get_rng_state, set_rng_state\nfrom determined.common import check\nfrom determined.horovod import hvd\n\nIMPOSSIBLY_LARGE_EPOCHS = sys.maxsize\n\n\ndef is_tf2_enabled() -> bool:\n \"\"\"Checks if `tf.compat.v1.disable_v2_behavior` has been called.\"\"\"\n if version.parse(tf.__version__) < version.parse(\"2.0.0\"):\n return False\n\n try:\n # Try recent tf2 variant first.\n return tf._tf2.enabled() # type: ignore\n except AttributeError:\n # Fallback to legacy option for tensorflow circa 2.2.0.\n return tf.python.tf2.enabled() # type: ignore\n\n\ndef load_optimizer_weights(\n model: Model, h5group: Any, optimizer: tf.keras.optimizers.Optimizer\n) -> None:\n \"\"\"\n Load the optimizer states from a tf.keras model saved with\n tf.keras.models.save_model(). Ignores and prints a warning message when\n encountering a graph network. This implementation is lifted from\n tf.keras.models.load_model().\n \"\"\"\n tf2_2_or_newer = version.parse(tf.__version__) >= version.parse(\"2.2.0\")\n if model._is_graph_network or tf2_2_or_newer: # pylint: disable=protected-access\n if tf2_2_or_newer:\n try:\n optimizer._create_all_weights(model.trainable_variables)\n except (NotImplementedError, AttributeError):\n logging.warning(\n \"Error when creating the weights of optimizer, making it \"\n \"impossible to restore the saved optimizer state. As a result, \"\n \"your model is starting with a freshly initialized optimizer.\"\n )\n else:\n # Build train function (to get weight updates). Models that aren't\n # graph networks must wait until they are called with data to\n # _make_train_function() and so can't load optimizer weights.\n model._make_train_function()\n\n optimizer_weight_values = load_optimizer_weights_from_hdf5_group(h5group)\n try:\n optimizer.set_weights(optimizer_weight_values)\n except ValueError:\n logging.warning(\n \"Error in loading the saved optimizer \"\n \"state. As a result, your model is \"\n \"starting with a freshly initialized \"\n \"optimizer.\"\n )\n else:\n logging.warning(\n \"Sequential models without an `input_shape` \"\n \"passed to the first layer cannot reload their \"\n \"optimizer state. As a result, your model is \"\n \"starting with a freshly initialized optimizer.\"\n )\n\n\nclass TrialControllerMultiplexer(keras.callbacks._MultiplexerBase):\n \"\"\"\n Extend _MultiplexerBase with the logic for triggering on_train_workload_end, and on_test_end\n and based on master-requested workloads.\n \"\"\"\n\n def __init__(self, trial_controller: \"TFKerasTrialController\", *arg: Any, **kwarg: Any) -> None:\n super().__init__(*arg, **kwarg)\n self.trial_controller = trial_controller\n self.test_inputs = 0\n\n def on_train_begin(self, logs: Optional[Dict] = None) -> None:\n super().on_train_begin()\n self.trial_controller._control_loop()\n\n def on_train_batch_end(self, batch: int, logs: Optional[Dict] = None) -> None:\n super().on_train_batch_end(batch, logs)\n assert isinstance(logs, dict)\n\n # Keras helpfully records the observed batch size as logs[\"size\"]. Keras internal code\n # handles the case where logs is not present (see BaseLogger callback). I (rb) can't\n # figure out where that would originate from, so we will include reasonable fallback\n # behavior for that case.\n num_inputs = logs.get(\"size\", self.batch_size)\n\n self.trial_controller._post_train_batch_end(num_inputs, logs)\n\n def on_test_begin(self, logs: Optional[Dict] = None) -> None:\n super().on_test_begin(logs)\n self.test_inputs = 0\n\n def on_test_batch_end(self, batch: int, logs: Optional[Dict] = None) -> None:\n super().on_test_batch_end(batch, logs)\n assert isinstance(logs, dict)\n self.test_inputs += logs.get(\"size\", self.batch_size)\n\n def _corrected_test_end(self, logs: Dict) -> None:\n super()._corrected_test_end(logs)\n self.trial_controller._stop_training_check()\n\n def get_test_inputs(self) -> int:\n return self.test_inputs\n\n def _corrected_epoch_end(self, epoch: int, logs: Dict) -> None:\n super()._corrected_epoch_end(epoch, logs)\n self.trial_controller._stop_training_check()\n\n def on_train_end(self, logs: Optional[Dict] = None) -> None:\n # Ignore on_train_end when we manage the training loop, since in TF 2.0 (but not 2.1!) will\n # trigger an exta on_train_end when we raise the WorkerFinishedGracefully exception.\n pass\n\n def _corrected_train_end(self, logs: Optional[Dict] = None) -> None:\n super().on_train_end(logs)\n\n\nclass TFKerasTrialController(det.LoopTrialController):\n @staticmethod\n def supports_averaging_training_metrics() -> bool:\n return True\n\n @staticmethod\n def pre_execute_hook(env: det.EnvContext, hvd_config: horovod.HorovodContext) -> None:\n # Initialize the correct horovod.\n if hvd_config.use:\n hvd.require_horovod_type(\"tensorflow.keras\", \"TFKerasTrial is in use.\")\n hvd.init()\n\n # Start with a clean graph.\n tf.compat.v1.reset_default_graph()\n\n TFKerasTrialController._set_random_seeds(env.trial_seed)\n\n # For the Native API we must configure the Session before running user code.\n if env.experiment_config.native_enabled():\n session_config = tf.compat.v1.ConfigProto(allow_soft_placement=True)\n TFKerasTrialController._configure_session(env, hvd_config, session_config)\n\n @staticmethod\n def _set_random_seeds(seed: int) -> None:\n # Set identical random seeds on all training processes. When using horovod, each worker will\n # start at a unique offset in the dataset, ensuring it's processing a unique training batch.\n random.seed(seed)\n np.random.seed(seed)\n tf.compat.v1.set_random_seed(seed)\n\n @staticmethod\n def _configure_session(\n env: det.EnvContext,\n hvd_config: horovod.HorovodContext,\n session_config: tf.compat.v1.ConfigProto,\n ) -> Optional[tf.compat.v1.Session]:\n if not tf.executing_eagerly():\n session_config.gpu_options.allow_growth = True\n if hvd_config.use:\n # We launch a horovod process per GPU. Each process\n # needs to bind to a unique GPU.\n session_config.gpu_options.visible_device_list = str(hvd.local_rank())\n\n session = tf.compat.v1.Session(\n graph=tf.compat.v1.get_default_graph(), config=session_config\n )\n\n tf.compat.v1.keras.backend.set_session(session)\n\n return session\n else:\n gpus = tf.config.experimental.list_physical_devices(\"GPU\")\n\n if len(gpus) > 0:\n local_rank = hvd.local_rank() if hvd_config.use else 0\n gpu = gpus[local_rank]\n tf.config.experimental.set_visible_devices(gpu, \"GPU\")\n tf.config.experimental.set_memory_growth(gpu, True)\n\n return None\n\n @staticmethod\n def compile_model(\n context: keras.TFKerasContext,\n compile_args: inspect.BoundArguments,\n env: det.EnvContext,\n hvd_config: horovod.HorovodContext,\n ) -> None:\n context.model = keras._get_multi_gpu_model_if_using_native_parallel(\n pre_compiled_model=context.model,\n env=env,\n hvd_config=hvd_config,\n )\n\n if \"optimizer\" in compile_args.arguments:\n # For backwards compatibility we check if an optimizer is passed as part\n # of the compile call. If `wrap_optimizer()` is used, we will ignore this\n # this optimizer.\n compile_args.arguments[\"optimizer\"] = context._process_optimizer_from_compile(\n compile_args.arguments[\"optimizer\"]\n )\n\n if hvd_config.use and version.parse(\"2.0.0\") <= version.parse(\n tf.__version__\n ) < version.parse(\"2.2.0\"):\n logging.info(\n \"Calling `model.compile(...)` with `experimental_run_tf_function=False` to ensure \"\n \"TensorFlow calls `optimizer.get_gradients()` to compute gradients.\"\n )\n\n context.model.compile(\n *compile_args.args, **compile_args.kwargs, experimental_run_tf_function=False\n )\n else:\n context.model.compile(*compile_args.args, **compile_args.kwargs)\n\n @staticmethod\n def from_trial(\n trial_inst: det.Trial,\n context: det.TrialContext,\n env: det.EnvContext,\n workloads: workload.Stream,\n load_path: Optional[pathlib.Path],\n rendezvous_info: det.RendezvousInfo,\n hvd_config: horovod.HorovodContext,\n ) -> det.TrialController:\n check.is_instance(\n context, keras.TFKerasTrialContext, \"TFKerasTrialController needs a TFKerasTrialContext\"\n )\n context = cast(keras.TFKerasTrialContext, context)\n\n check.is_instance(trial_inst, TFKerasTrial, \"TFKerasTrialController needs a TFKerasTrial\")\n trial = cast(TFKerasTrial, trial_inst)\n\n session = TFKerasTrialController._configure_session(env, hvd_config, trial.session_config())\n\n training_data = keras._adapt_data_from_data_loader(\n input_data=trial.build_training_data_loader(),\n batch_size=context.get_per_slot_batch_size(),\n )\n\n validation_data = keras._adapt_data_from_data_loader(\n input_data=trial.build_validation_data_loader(),\n batch_size=context.get_per_slot_batch_size(),\n )\n\n trial.build_model()\n check.is_not_none(context.model, \"Please call wrap_model(...).\")\n\n check.is_not_none(context.compile_args, \"Please call model.compile(...).\")\n compile_args = cast(inspect.BoundArguments, context.compile_args)\n\n TFKerasTrialController.compile_model(\n context=context, compile_args=compile_args, env=env, hvd_config=hvd_config\n )\n\n tf_keras_callbacks = trial.keras_callbacks()\n\n return TFKerasTrialController(\n context.model,\n session,\n keras.TFKerasTrainConfig(training_data, validation_data, tf_keras_callbacks),\n context,\n env,\n workloads,\n load_path,\n rendezvous_info,\n hvd_config,\n )\n\n @staticmethod\n def from_native(\n context: det.NativeContext,\n env: det.EnvContext,\n workloads: workload.Stream,\n load_path: Optional[pathlib.Path],\n rendezvous_info: det.RendezvousInfo,\n hvd_config: horovod.HorovodContext,\n ) -> det.TrialController:\n check.is_instance(\n context,\n keras.TFKerasNativeContext,\n \"TFKerasTrialController needs a TFKerasSprinkleContext\",\n )\n context = cast(keras.TFKerasNativeContext, context)\n\n check.is_not_none(context.model, \"Please call wrap_model(...).\")\n\n check.is_not_none(context.compile_args, \"Please call model.compile(...).\")\n check.is_not_none(\n context.train_config, \"Please call model.fit(...) or model.fit_generator(...).\"\n )\n\n # For the Native API, we would break the user's model if we changed the session\n # right now, so we have to trust the user did not modify what we set previously.\n #\n # TODO(ryan): Fix this, probably with a function for configuring the backend session.\n session = tf.compat.v1.keras.backend.get_session()\n\n compile_args = cast(inspect.BoundArguments, context.compile_args)\n train_config = cast(keras.TFKerasTrainConfig, context.train_config)\n\n TFKerasTrialController.compile_model(\n context=context, compile_args=compile_args, env=env, hvd_config=hvd_config\n )\n\n return TFKerasTrialController(\n context.model,\n session,\n train_config,\n context,\n env,\n workloads,\n load_path,\n rendezvous_info,\n hvd_config,\n )\n\n def __init__(\n self,\n model: tf.keras.models.Model,\n session: tf.compat.v1.ConfigProto,\n train_config: keras.TFKerasTrainConfig,\n *args: Any,\n **kwargs: Any,\n ) -> None:\n super().__init__(*args, **kwargs)\n\n self.model = model\n self.session = session\n\n # Configure optimizers, done for backwards compatibility.\n self.context._select_optimizers()\n\n keras._check_if_aggregation_frequency_will_work(\n model=self.model, hvd_config=self.hvd_config\n )\n\n self.training_data = train_config.training_data\n self.validation_data = train_config.validation_data\n\n # Support the deprecated SequenceAdapter API.\n if isinstance(self.training_data, keras.SequenceAdapter):\n self.context._configure_fit(\n workers=self.training_data.workers,\n use_multiprocessing=self.training_data.use_multiprocessing,\n max_queue_size=self.training_data.max_queue_size,\n )\n # Use the provided Sequence directly.\n self.training_data = self.training_data.sequence\n if isinstance(self.validation_data, keras.SequenceAdapter):\n # Ignore these settings and use the same settings as for the fit call.\n self.validation_data = self.validation_data.sequence\n\n self._check_training_data()\n self._check_validation_data()\n\n self.enqueuers = [] # type: List[keras._Enqueuer]\n\n # If a load path is provided, load weights and restore the data location.\n self._load()\n\n self._configure_callbacks(train_config.callbacks)\n\n self.train_response_func = None # type: Optional[workload.ResponseFunc]\n self.train_workload_metrics = [] # type: List[Dict[str, Any]]\n self.train_workload_batches = 0\n self.train_workload_inputs = 0\n self.train_workload_len = 0\n self.test_inputs = 0\n\n def _check_training_data(self) -> None:\n cacheable_used = self.context.experimental.get_train_cacheable().is_decorator_used()\n wrap_used = self.context.dataset_initialized\n\n # Non-tf.data.Datasets should not have used the data layer.\n if not isinstance(self.training_data, tf.data.Dataset):\n if cacheable_used:\n raise det.errors.InvalidExperimentException(\n \"Pass in a tf.data.Dataset object for training data if using \"\n \"context.experimental.cache_train_dataset().\",\n )\n return\n\n # You can't use data layer and the wrap_dataset.\n if cacheable_used and wrap_used:\n raise det.errors.InvalidExperimentException(\n \"Please do not use: context.wrap_dataset(dataset) if using \"\n \"context.experimental.cache_train_dataset() and \"\n \"context.experimental.cache_validation_dataset().\",\n )\n\n # You must use either data layer or wrap_dataset.\n if not cacheable_used and not wrap_used:\n raise det.errors.InvalidExperimentException(\n \"Please use either context.wrap_dataset(dataset) or \"\n \"context.experimental.cache_train_dataset() for tf.data.dataset inputs\"\n )\n\n def _check_validation_data(self) -> None:\n cacheable_used = self.context.experimental.get_validation_cacheable().is_decorator_used()\n wrap_used = self.context.dataset_initialized\n\n # Non-tf.data.Datasets should not have used the data layer.\n if not isinstance(self.validation_data, tf.data.Dataset):\n if cacheable_used:\n raise det.errors.InvalidExperimentException(\n \"Pass in a tf.data.Dataset object for validation data if using \"\n \"context.experimental.cache_validation_dataset().\",\n )\n return\n\n # You can't use data layer and the wrap_dataset.\n if cacheable_used and wrap_used:\n raise det.errors.InvalidExperimentException(\n \"Please do not use: context.wrap_dataset(dataset) if using \"\n \"context.experimental.cache_train_dataset() and \"\n \"context.experimental.cache_validation_dataset().\",\n )\n\n # You must use either data layer or wrap_dataset.\n if not cacheable_used and not wrap_used:\n raise det.errors.InvalidExperimentException(\n \"Please use either context.wrap_dataset(dataset) or \"\n \"context.experimental.cache_validation_dataset() for tf.data.dataset inputs\"\n )\n\n def _configure_callbacks(self, user_callbacks: Optional[List]) -> None:\n \"\"\"\n If we pass a callbacks parameter to model.fit() or model.evaluate() which is a\n pre-constructed CallbackList, Keras will not alter it. We can use this property to\n configure the exact callback order that we want in our system.\n\n The implementation is based closely on from the real\n tf.keras.callbacks.configure_callbacks(), with the following differences:\n\n - We always assume we have the original Callbacks list.\n - We prepend and append additional Determined and Horovod callbacks\n - We create a det.keras.CallbackList instead of the normal tf.keras one.\n \"\"\"\n\n callbacks = user_callbacks or []\n check.is_instance(\n callbacks,\n list,\n \"the callbacks parameter of model.fit() or model.eval() must be a list of Callbacks\",\n )\n\n if self.env.experiment_config.get_records_per_epoch() is None:\n for cb in callbacks:\n if util.is_overridden(cb.on_epoch_end, tf.keras.callbacks.Callback) and not getattr(\n cb, \"_skip_epoch_end_check\", False\n ):\n if isinstance(cb, keras.callbacks.Callback):\n # New callbacks must obey the rules.\n raise AssertionError(\n \"it is unsupported to use a Callback that defines on_epoch_end \"\n f\"({type(cb).__name__}) without setting the records_per_epoch value \"\n \"in the experiment config\"\n )\n else:\n # Pre-existing callbacks only get a warning.\n logging.warning(\n \"It is unsupported to use a Callback that defines on_epoch_end \"\n f\"({type(cb).__name__})without setting the records_per_epoch value in \"\n \"the experiment config. Training will continue but on_epoch_end will \"\n \"never be called.\"\n )\n\n # Standard post-callback from the real configure_callbacks().\n # Note that we are not including BaseLogger since it is only for averaging metrics over an\n # entire epoch, and we don't report any metrics in on_epoch_end at all.\n self.model.history = keras.callbacks._DeterminedHistory()\n callbacks = callbacks + [self.model.history]\n\n if self.context._fit_verbose:\n # Our implementation of verbose=True.\n callbacks = [keras.callbacks._DeterminedProgress()] + callbacks\n\n # Calculate batches per epoch. We can only handle batches per epoch, not records per epoch,\n # because we would have to communicate after every batch to know how many records were in\n # each batch on each worker in order to trigger on_epoch_end callbacks correctly.\n batches_per_epoch = None\n records_per_epoch = self.env.experiment_config.get_records_per_epoch()\n if records_per_epoch is not None:\n batches_per_epoch = records_per_epoch // self.context.get_global_batch_size()\n\n # We wrap all of the callbacks in a single Multiplexer.\n self.multiplexer = TrialControllerMultiplexer(\n self,\n callbacks,\n self.is_chief,\n self.batch_size,\n batches_per_epoch,\n self.multiplexer_load_state,\n )\n callbacks = [self.multiplexer]\n\n if self.hvd_config.use:\n # Horovod synchronization of initial variables should happen even before we enter our\n # control loop, in case we have an initial validation requested.\n callbacks = [hvd.callbacks.BroadcastGlobalVariablesCallback(0)] + callbacks\n\n # The remainder of Determined control logic is done with a custom CallbackList\n self.callback_list = CallbackList(callbacks)\n\n # Disable timing of callbacks in some versions of keras. This can fail in some corner-cases\n # because CallbackList is not designed to allow some callbacks to call other callbacks, and\n # they can interact very poorly.\n if hasattr(self.callback_list, \"_timing\"):\n self.callback_list._timing[\"on_train_batch_begin\"] = True\n self.callback_list._timing[\"on_train_batch_end\"] = True\n self.callback_list._timing[\"on_test_batch_begin\"] = True\n self.callback_list._timing[\"on_test_batch_end\"] = True\n self.callback_list._timing[\"on_predict_batch_begin\"] = True\n self.callback_list._timing[\"on_predict_batch_end\"] = True\n\n # callback_model is the model given to callbacks, where we should be checking for\n # stop_training. In horovod dtrain or non-dtrain, it should always be self.model.\n callback_model = self.model._get_callback_model()\n self.callback_list.set_model(callback_model)\n\n # Fill in bogus values for most of these... some of them are very complex to calculate.\n set_callback_parameters(\n self.callback_list,\n self.model,\n do_validation=False,\n batch_size=self.batch_size,\n epochs=None,\n steps_per_epoch=None,\n samples=None,\n verbose=False,\n mode=ModeKeys.TRAIN,\n )\n\n self.callback_list.model.stop_training = False\n\n def _save_checkpoint(self, path: pathlib.Path) -> workload.Response:\n if not self.is_chief:\n return workload.Skipped()\n\n path.mkdir(parents=True, exist_ok=True)\n\n # Save model weights. We use `tf` format because `h5` does not support\n # models that subclass `tf.keras.Model` and define custom `call()`\n # and/or `train_step()` functions.\n self.model.save_weights(\n str(path.joinpath(\"determined-keras-model-weights\")), save_format=\"tf\"\n )\n\n # Save optimizer(s) weights.\n with h5py.File(path.joinpath(\"determined-keras-optimizer-weights.h5\"), \"w\") as h5file:\n for idx, optimizer in enumerate(self.context._optimizers):\n opt_group = h5file.create_group(f\"optimizer-{idx}\")\n save_optimizer_weights_to_hdf5_group(opt_group, optimizer)\n\n # Save RNG state.\n rng_state = get_rng_state()\n\n with open(path.joinpath(\"rng_state.pkl\"), \"wb\") as f:\n pickle.dump(rng_state, f)\n\n # Save user code.\n det.util.write_user_code(path, self.env.on_cluster)\n\n # Save callback(s) state.\n callbacks_state = self.multiplexer._get_state()\n with path.joinpath(\"determined-callbacks.v1.pkl\").open(\"wb\") as f:\n pickle.dump(callbacks_state, f)\n\n self.multiplexer._checkpoint_end(path)\n\n return {\"framework\": f\"tensorflow-{tf.__version__}\", \"format\": \"saved_weights\"}\n\n def _load_model_weights(self, model_weights_checkpoint_path: pathlib.Path) -> None:\n logging.info(f\"Restoring model weights from {model_weights_checkpoint_path}.\")\n self.model.load_weights(str(model_weights_checkpoint_path))\n\n def _load_optimizers_weights(self, optimizer_weights_checkpoint_path: pathlib.Path) -> None:\n logging.info(f\"Restoring optimizer weights from {optimizer_weights_checkpoint_path}.\")\n with h5py.File(optimizer_weights_checkpoint_path, \"r\") as h5file:\n if \"optimizer_weights\" in h5file:\n load_optimizer_weights(\n self.model, h5file[\"optimizer_weights\"], self.model.optimizer\n )\n return\n\n for idx, optimizer in enumerate(self.context._optimizers):\n if f\"optimizer-{idx}\" in h5file:\n load_optimizer_weights(self.model, h5file[f\"optimizer-{idx}\"], optimizer)\n\n def _load_model_and_optimizer_weights_v1(self) -> None:\n self.load_path = cast(pathlib.Path, self.load_path)\n self._load_model_weights(self.load_path.joinpath(\"determined-keras-model\"))\n self._load_optimizers_weights(self.load_path.joinpath(\"determined-keras-model\"))\n\n def _load_model_and_optimizer_weights_v2(self) -> None:\n self.load_path = cast(pathlib.Path, self.load_path)\n self._load_model_weights(self.load_path.joinpath(\"determined-keras-model.h5\"))\n self._load_optimizers_weights(self.load_path.joinpath(\"determined-keras-model.h5\"))\n\n def _load_model_and_optimizer_weights_v3(self) -> None:\n self.load_path = cast(pathlib.Path, self.load_path)\n self._load_model_weights(self.load_path.joinpath(\"determined-keras-model-weights\"))\n self._load_optimizers_weights(\n self.load_path.joinpath(\"determined-keras-optimizer-weights.h5\")\n )\n\n def _load(self) -> None:\n self.multiplexer_load_state = None # type: Optional[Dict]\n if not self.load_path:\n return\n\n # Find model code path, we check multiple naming conventions for backwards compatibility.\n if self.load_path.joinpath(\"determined-keras-model.h5\").exists():\n self._load_model_and_optimizer_weights_v2()\n elif self.load_path.joinpath(\"determined-keras-optimizer-weights.h5\").exists():\n self._load_model_and_optimizer_weights_v3()\n else:\n self._load_model_and_optimizer_weights_v1()\n\n # Load RNG state.\n try:\n with open(self.load_path.joinpath(\"rng_state.pkl\"), \"rb\") as f:\n rng_state = pickle.load(f)\n\n set_rng_state(rng_state)\n except IOError:\n logging.warning(\"Checkpoint did not include RNG state.\")\n\n # Load callbacks.\n cb_state_path = self.load_path.joinpath(\"determined-callbacks.v1.pkl\")\n if cb_state_path.exists():\n with cb_state_path.open(\"rb\") as f:\n self.multiplexer_load_state = pickle.load(f)\n\n def run(self) -> None:\n try:\n self._launch_fit()\n except det.errors.WorkerFinishedGracefully:\n pass\n finally:\n self._stop_enqueuers()\n\n def _launch_fit(self) -> None:\n training_data = self.training_data\n\n if isinstance(training_data, tf.keras.utils.Sequence):\n # Handle args from fit(): shuffle, workers, use_multiprocessing, and max_queue_size.\n enqueuer = keras._build_enqueuer(\n sequence=training_data,\n workers=self.context._fit_workers,\n use_multiprocessing=self.context._fit_use_multiprocessing,\n max_queue_size=self.context._fit_max_queue_size,\n shard_rank=self.context.distributed.get_rank(),\n num_shards=self.context.distributed.get_size(),\n repeat=True,\n shuffle=self.context._fit_shuffle,\n shuffle_seed=self.context.get_trial_seed(),\n prior_batches_trained=self.env.initial_workload.total_batches_processed,\n )\n enqueuer.start()\n self.enqueuers.append(enqueuer)\n training_data = enqueuer.data()\n\n if isinstance(training_data, tf.data.Dataset):\n training_data = training_data.repeat()\n if self.context._fit_shuffle:\n logging.warning(\n \"You set shuffle=True for a tf.data.Dataset, which will be ignored. \"\n \"Please call .shuffle() on your dataset instead.\"\n )\n\n self.model.fit(\n training_data,\n class_weight=self.context._fit_class_weight,\n callbacks=self.callback_list,\n shuffle=False,\n steps_per_epoch=sys.maxsize,\n epochs=IMPOSSIBLY_LARGE_EPOCHS,\n validation_split=0,\n verbose=0,\n workers=0,\n )\n\n def _launch_evaluate(self) -> Any:\n validation_data = self.validation_data\n steps = None\n\n if isinstance(validation_data, tf.keras.utils.Sequence):\n # Calculate the length of our validation shard.\n steps = len(validation_data)\n if self.context.distributed.get_size() > 1:\n size = self.context.distributed.get_size()\n rank = self.context.distributed.get_rank()\n steps = steps // size + (1 if steps % size > rank else 0)\n\n # Handle args from fit(): shuffle, workers, use_multiprocessing, and max_queue_size.\n enqueuer = keras._build_enqueuer(\n sequence=validation_data,\n workers=self.context._fit_workers,\n use_multiprocessing=self.context._fit_use_multiprocessing,\n max_queue_size=self.context._fit_max_queue_size,\n shard_rank=self.context.distributed.get_rank(),\n num_shards=self.context.distributed.get_size(),\n repeat=False,\n shuffle=False,\n shuffle_seed=0,\n prior_batches_trained=0,\n )\n enqueuer.start()\n self.enqueuers.append(enqueuer)\n validation_data = enqueuer.data()\n\n if isinstance(validation_data, tf.data.Dataset):\n # Handle validation_steps, which in Keras only applies to tf.data.Datasets.\n steps = self.context._fit_validation_steps\n\n # Starting in TF 2.2 users may define custom test_step() that do\n # not use the model metrics.\n use_model_metrics = not (\n version.parse(tf.__version__) >= version.parse(\"2.2.0\")\n and is_tf2_enabled()\n and tf.executing_eagerly()\n )\n evaluate_kwargs = {} if use_model_metrics else {\"return_dict\": True}\n\n if self.env.test_mode:\n steps = 1\n\n metrics_values = self.model.evaluate(\n validation_data,\n callbacks=self.callback_list,\n steps=steps,\n verbose=0,\n workers=0,\n **evaluate_kwargs,\n )\n logging.debug(f\"Worker finished model.evaluate() with metrics: {metrics_values}.\")\n\n # Clean up the enqueuer if we started one.\n if isinstance(self.validation_data, tf.keras.utils.Sequence):\n enqueuer.stop()\n self.enqueuers.remove(enqueuer)\n\n # A special side-effect of converting the keras sequence to a generator and passing\n # steps explicitly is that keras will exit our generator after N steps and the\n # Sequence.on_epoch_end() that normally runs after the last yield won't run at all\n # because the fit loop will call next() exactly `steps` times. So we try to match the\n # exact keras behavior by manually calling on_epoch_end() here.\n self.validation_data.on_epoch_end()\n\n # If the model was compiled with metrics=None, metrics_value will be a single value.\n if not isinstance(metrics_values, (tuple, list, dict)):\n metrics_values = (metrics_values,)\n\n if use_model_metrics:\n metrics = make_logs(self.model, {}, metrics_values, ModeKeys.TEST, prefix=\"val_\")\n else:\n check.is_instance(metrics_values, dict)\n metrics = {f\"val_{k}\": v for k, v in metrics_values.items()}\n\n return metrics\n\n def _control_loop(self) -> None:\n for wkld, args, response_func in self.workloads:\n logging.debug(f\"Received wkld {wkld.kind} with args {args}.\")\n if wkld.kind == workload.Workload.Kind.RUN_STEP:\n # Configure the state for a training step.\n self.train_response_func = response_func\n self.train_workload_batches = 0\n self.train_workload_metrics = []\n self.train_workload_len = wkld.num_batches\n self.multiplexer.set_batches_requested(wkld.num_batches)\n break\n elif wkld.kind == workload.Workload.Kind.COMPUTE_VALIDATION_METRICS:\n try:\n response_func(\n det.util.wrap_metrics(\n self._compute_validation_metrics(),\n self.context.get_stop_requested(),\n invalid_hp=False,\n )\n )\n except det.InvalidHP as e:\n logging.info(\n \"Invalid hyperparameter exception in trial validation step: {}\".format(e)\n )\n response_func(\n util.wrap_metrics(\n {},\n self.context.get_stop_requested(),\n invalid_hp=True,\n )\n )\n elif wkld.kind == workload.Workload.Kind.CHECKPOINT_MODEL:\n check.len_eq(args, 1)\n check.is_instance(args[0], pathlib.Path)\n path = cast(pathlib.Path, args[0])\n response_func(self._save_checkpoint(path))\n elif wkld.kind == workload.Workload.Kind.TERMINATE:\n response_func({} if self.is_chief else workload.Skipped())\n self.multiplexer._corrected_train_end()\n raise det.errors.WorkerFinishedGracefully\n else:\n raise AssertionError(f\"Unknown workload kind {wkld.kind}.\")\n\n def _allreduce_logs(self, logs: Dict) -> Dict:\n if not self.hvd_config.use:\n return logs\n # Reduce logs in key-sorted to be deterministic across workers.\n keys = sorted(logs)\n logging.debug(f\"all-reducing logs on worker {hvd.rank()} for {len(keys)} keys {keys}.\")\n return {\n key: np.array(self._hvd_allreduce(logs[key], average=True, name=key)) for key in keys\n }\n\n def _hvd_allreduce(self, value: Any, average: bool, name: str) -> Any:\n # The signature of our horovod allreduce changed after we rebased onto 0.21.\n hvd_sig = inspect.signature(hvd.allreduce)\n horovod_kwargs = {\n \"value\": value,\n \"name\": name,\n } # type: Dict[str, Any]\n\n if \"op\" in hvd_sig.parameters:\n horovod_kwargs[\"op\"] = hvd.Average if average else hvd.Sum\n\n # average has not yet been removed but it's deprecated. It defaults\n # to true and horovod does not support specifying an op while having\n # average be not None.\n if \"average\" in hvd_sig.parameters:\n horovod_kwargs[\"average\"] = None\n else:\n horovod_kwargs[\"average\"] = average\n\n return hvd.allreduce(**horovod_kwargs)\n\n def _convert_possible_tensor(self, possible_tensor: Any) -> Any:\n if isinstance(possible_tensor, EagerTensor):\n # Horovod and / or TensorFlow may promote scalars to tensors in eager mode.\n return possible_tensor.numpy()\n return possible_tensor\n\n def _post_train_batch_end(self, num_inputs: int, logs: Dict) -> None:\n # Remove default keras metrics we aren't interested in like \"batch\" and \"size\".\n self.train_workload_metrics.append(\n {\n k: self._convert_possible_tensor(v)\n for k, v in logs.items()\n if k not in {\"batch\", \"size\"}\n }\n )\n self.train_workload_inputs += num_inputs\n self.train_workload_batches += 1\n if self.train_workload_batches != self.train_workload_len:\n return\n\n if self.train_response_func is None:\n raise AssertionError(\n \"Callback should avoid calling model.predict(), \"\n \"as this will affect Determined training behavior\",\n )\n\n if self.hvd_config.use:\n num_inputs = self._hvd_allreduce(num_inputs, average=False, name=\"train_num_inputs\")\n num_inputs = self._convert_possible_tensor(num_inputs)\n\n # Return only the latest metrics, which is the running average for all trained batches in\n # the step (Keras does not report individual logs, only running averages at any point).\n final_metrics = self.train_workload_metrics[-1]\n if self.env.experiment_config.averaging_training_metrics_enabled():\n final_metrics = self._allreduce_logs(final_metrics)\n\n self.multiplexer._train_workload_end(final_metrics)\n self._stop_training_check()\n\n if self.is_chief:\n # Don't use det.util.make_metrics, because our batch metrics are not raw metrics.\n\n response = {\n \"metrics\": {\n \"num_inputs\": num_inputs,\n \"batch_metrics\": self.train_workload_metrics,\n \"avg_metrics\": final_metrics,\n },\n \"stop_requested\": self.context.get_stop_requested(),\n \"invalid_hp\": False,\n }\n self.train_response_func(response)\n else:\n self.train_response_func(workload.Skipped())\n\n self.train_response_func = None\n\n self._control_loop()\n\n # Always reset metrics before starting a new training step.\n self.model.reset_metrics()\n\n def _compute_validation_metrics(self) -> workload.Response:\n metrics = self._launch_evaluate()\n num_inputs = self.multiplexer.get_test_inputs()\n\n if self.hvd_config.use:\n # Use a global ZMQ barrier here because we have observed cases where hvd.allreduce\n # may hang when called minutes apart by different workers which may happen if\n # workers complete evaluation at different speeds.\n _ = self.context.distributed._zmq_gather(None)\n\n num_inputs = hvd.allreduce(num_inputs, average=False, name=\"validation_num_inputs\")\n if isinstance(num_inputs, EagerTensor):\n # Horovod will promote an int to a tensor in eager mode.\n num_inputs = num_inputs.numpy()\n\n metrics = self._allreduce_logs(metrics)\n check.gt(len(metrics), 0)\n\n self.multiplexer._test_end(metrics)\n\n if not self.is_chief:\n return workload.Skipped()\n\n return {\"num_inputs\": num_inputs, \"validation_metrics\": metrics}\n\n def _stop_training_check(self) -> None:\n # Detect when users set stop_training and convert it to a set_stop_requested.\n if self.multiplexer.model.stop_training:\n if self.is_chief:\n self.multiplexer.model.stop_training = False\n self.context.set_stop_requested(True)\n else:\n logging.debug(\"cancelling model.stop_training on non-chief worker\")\n self.multiplexer.model.stop_training = True\n\n def _stop_enqueuers(self) -> None:\n for enqueuer in self.enqueuers:\n enqueuer.stop()\n\n\nclass TFKerasTrial(det.Trial):\n \"\"\"\n To implement a new ``tf.keras`` trial, subclass this class and\n implement the abstract methods described below (:meth:`build_model`,\n :meth:`build_training_data_loader`, and :meth:`build_validation_data_loader`).\n In most cases you should provide a custom :meth:`__init__` method as well.\n\n By default, experiments use TensorFlow 1.x. To configure your trial to use\n TensorFlow 2.x, specify a TensorFlow 2.x image in the\n :ref:`environment.image <exp-environment-image>` field of the experiment\n configuration (e.g.,\n ``determinedai/environments:cuda-11.0-pytorch-1.7-lightning-1.2-tf-2.4-gpu-0.13.0``).\n\n Trials default to using eager execution with TensorFlow 2.x but not with\n TensorFlow 1.x. To override the default behavior, call the appropriate\n function at the top of your code. For example, if you want to disable\n eager execution while using TensorFlow 2.x, call\n ``tf.compat.v1.disable_eager_execution`` after your import statements.\n If you are using TensorFlow 1.x in eager mode, please add\n ``experimental_run_tf_function=False`` to your model compile function.\n\n For more information on writing ``tf.keras`` trial classes, refer to the\n :ref:`tutorial <tf-mnist-tutorial>`.\n \"\"\"\n\n trial_controller_class = TFKerasTrialController\n trial_context_class = keras.TFKerasTrialContext\n\n def __init__(self, context: keras.TFKerasTrialContext) -> None:\n \"\"\"\n Initializes a trial using the provided ``context``.\n\n This method should typically be overridden by trial definitions: at minimum,\n it is important to store ``context`` as an instance variable so that\n it can be accessed by other methods of the trial class. This can also be a\n convenient place to initialize other state that is shared between methods.\n \"\"\"\n self.context = context\n\n @abstractmethod\n def build_model(self) -> tf.keras.models.Model:\n \"\"\"\n Returns the deep learning architecture associated with a trial. The\n architecture might depend on the current values of the model's\n hyperparameters, which can be accessed via :func:`context.get_hparam()\n <determined.TrialContext.get_hparam>`. This function returns a\n ``tf.keras.Model`` object.\n\n After constructing the ``tf.keras.Model`` object, users **must** do two\n things before returning it:\n\n 1. Wrap the model using :meth:`context.wrap_model()\n <determined.keras.TFKerasTrialContext.wrap_model>`.\n\n 2. Compile the model using ``model.compile()``.\n \"\"\"\n pass\n\n @abstractmethod\n def build_training_data_loader(self) -> keras.InputData:\n \"\"\"\n Defines the data loader to use during training.\n\n Should return one of the following:\n 1) A tuple ``(x_train, y_train)``, where ``x_train`` is a NumPy array\n (or array-like), a list of arrays (in case the model has multiple inputs), or\n a dict mapping input names to the corresponding array, if the model has named inputs.\n ``y_train`` should be a NumPy array.\n\n 2) A tuple ``(x_train, y_train, sample_weights)``\n of NumPy arrays.\n\n 3) A `tf.data.Dataset\n <https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/data/Dataset>`__ returning\n a tuple of either ``(inputs, targets)`` or ``(inputs, targets, sample_weights)``.\n\n 4) A `keras.utils.Sequence\n <https://tensorflow.org/api_docs/python/tf/keras/utils/Sequence>`__ returning a tuple\n of either ``(inputs, targets)`` or ``(inputs, targets, sample weights)``.\n\n When using ``tf.data.Dataset``, you must wrap the dataset using\n :meth:`determined.keras.TFKerasTrialContext.wrap_dataset`. This wrapper is used\n to shard the dataset for distributed training. For optimal performance, users\n should wrap a dataset immediately after creating it.\n\n .. warning::\n If you are using ``tf.data.Dataset``, Determined’s support for\n automatically checkpointing the dataset does not currently work correctly.\n This means that resuming workloads will start from the beginning of the dataset\n if using ``tf.data.Dataset``.\n \"\"\"\n pass\n\n @abstractmethod\n def build_validation_data_loader(self) -> keras.InputData:\n \"\"\"\n Defines the data loader to use during validation.\n\n Should return one of the following:\n 1) A tuple ``(x_val, y_val)``, where ``x_val`` is a NumPy array\n (or array-like), a list of arrays (in case the model has multiple inputs), or\n a dict mapping input names to the corresponding array, if the model has named inputs.\n ``y_val`` should be a NumPy array.\n\n 2) A tuple ``(x_val, y_val, sample_weights)``\n of NumPy arrays.\n\n 3) A `tf.data.Dataset\n <https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/data/Dataset>`__ returning\n a tuple of either ``(inputs, targets)`` or ``(inputs, targets, sample_weights)``.\n\n 4) A `keras.utils.Sequence\n <https://tensorflow.org/api_docs/python/tf/keras/utils/Sequence>`__ returning a tuple\n of either ``(inputs, targets)`` or ``(inputs, targets, sample weights)``.\n\n When using ``tf.data.Dataset``, you must wrap the dataset using\n :meth:`determined.keras.TFKerasTrialContext.wrap_dataset`. This wrapper is used\n to shard the dataset for distributed training. For optimal performance, users\n should wrap a dataset immediately after creating it.\n \"\"\"\n pass\n\n def session_config(self) -> tf.compat.v1.ConfigProto:\n \"\"\"\n Specifies the `tf.ConfigProto\n <https://www.tensorflow.org/api_docs/python/tf/compat/v1/ConfigProto>`__ to be\n used by the TensorFlow session. By default,\n ``tf.ConfigProto(allow_soft_placement=True)`` is used.\n \"\"\"\n return tf.compat.v1.ConfigProto(allow_soft_placement=True)\n\n def keras_callbacks(self) -> List[tf.keras.callbacks.Callback]:\n \"\"\"\n Specifies a list of :class:`determined.keras.callbacks.Callback` objects to be used during\n training.\n\n Callbacks should avoid calling ``model.predict()``, as this will affect Determined training\n behavior.\n\n .. note:\n Note that :class:`determined.keras.callbacks.Callback` is a subclass of\n `tf.keras.callback.Callback\n <https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/Callback>`__ objects\n which supports stateful callbacks that can be checkpointed an restored mid-training.\n\n Please see :class:`determined.keras.callbacks.Callback` for a summary of differences\n between normal Keras callbacks and Determined Keras callbacks.\n\n .. warning:\n For legacy callbacks which do not subclass :class:`determined.keras.callbacks.Callback`,\n if ``records_per_epoch`` is not set in the experiement config for an experiment,\n ``on_epoch_end`` will never be called.\n \"\"\"\n return []\n" ]
[ [ "tensorflow.python.keras.saving.hdf5_format.load_optimizer_weights_from_hdf5_group", "tensorflow.compat.v1.ConfigProto", "tensorflow.executing_eagerly", "tensorflow.compat.v1.get_default_graph", "numpy.random.seed", "tensorflow.config.experimental.set_memory_growth", "tensorflow._tf2.enabled", "tensorflow.compat.v1.keras.backend.set_session", "tensorflow.compat.v1.keras.backend.get_session", "tensorflow.config.experimental.list_physical_devices", "tensorflow.python.keras.saving.hdf5_format.save_optimizer_weights_to_hdf5_group", "tensorflow.python.tf2.enabled", "tensorflow.python.keras.callbacks.make_logs", "tensorflow.python.keras.callbacks.set_callback_parameters", "tensorflow.compat.v1.set_random_seed", "tensorflow.compat.v1.reset_default_graph", "tensorflow.config.experimental.set_visible_devices", "tensorflow.python.keras.callbacks.CallbackList" ] ]
ShenYujun/genforce
[ "2ad04974cfaeba20b93c806531f987f06cc5c328" ]
[ "converters/stylegan2ada_tf_official/dnnlib/tflib/autosummary.py" ]
[ "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# NVIDIA CORPORATION and its licensors retain all intellectual property\n# and proprietary rights in and to this software, related documentation\n# and any modifications thereto. Any use, reproduction, disclosure or\n# distribution of this software and related documentation without an express\n# license agreement from NVIDIA CORPORATION is strictly prohibited.\n\n\"\"\"Helper for adding automatically tracked values to Tensorboard.\n\nAutosummary creates an identity op that internally keeps track of the input\nvalues and automatically shows up in TensorBoard. The reported value\nrepresents an average over input components. The average is accumulated\nconstantly over time and flushed when save_summaries() is called.\n\nNotes:\n- The output tensor must be used as an input for something else in the\n graph. Otherwise, the autosummary op will not get executed, and the average\n value will not get accumulated.\n- It is perfectly fine to include autosummaries with the same name in\n several places throughout the graph, even if they are executed concurrently.\n- It is ok to also pass in a python scalar or numpy array. In this case, it\n is added to the average immediately.\n\"\"\"\n\nfrom collections import OrderedDict\nimport numpy as np\nimport tensorflow as tf\nfrom tensorboard import summary as summary_lib\nfrom tensorboard.plugins.custom_scalar import layout_pb2\n\nfrom . import tfutil\nfrom .tfutil import TfExpression\nfrom .tfutil import TfExpressionEx\n\n# Enable \"Custom scalars\" tab in TensorBoard for advanced formatting.\n# Disabled by default to reduce tfevents file size.\nenable_custom_scalars = False\n\n_dtype = tf.float64\n_vars = OrderedDict() # name => [var, ...]\n_immediate = OrderedDict() # name => update_op, update_value\n_finalized = False\n_merge_op = None\n\n\ndef _create_var(name: str, value_expr: TfExpression) -> TfExpression:\n \"\"\"Internal helper for creating autosummary accumulators.\"\"\"\n assert not _finalized\n name_id = name.replace(\"/\", \"_\")\n v = tf.cast(value_expr, _dtype)\n\n if v.shape.is_fully_defined():\n size = np.prod(v.shape.as_list())\n size_expr = tf.constant(size, dtype=_dtype)\n else:\n size = None\n size_expr = tf.reduce_prod(tf.cast(tf.shape(v), _dtype))\n\n if size == 1:\n if v.shape.ndims != 0:\n v = tf.reshape(v, [])\n v = [size_expr, v, tf.square(v)]\n else:\n v = [size_expr, tf.reduce_sum(v), tf.reduce_sum(tf.square(v))]\n v = tf.cond(tf.is_finite(v[1]), lambda: tf.stack(v), lambda: tf.zeros(3, dtype=_dtype))\n\n with tfutil.absolute_name_scope(\"Autosummary/\" + name_id), tf.control_dependencies(None):\n var = tf.Variable(tf.zeros(3, dtype=_dtype), trainable=False) # [sum(1), sum(x), sum(x**2)]\n update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v))\n\n if name in _vars:\n _vars[name].append(var)\n else:\n _vars[name] = [var]\n return update_op\n\n\ndef autosummary(name: str, value: TfExpressionEx, passthru: TfExpressionEx = None, condition: TfExpressionEx = True) -> TfExpressionEx:\n \"\"\"Create a new autosummary.\n\n Args:\n name: Name to use in TensorBoard\n value: TensorFlow expression or python value to track\n passthru: Optionally return this TF node without modifications but tack an autosummary update side-effect to this node.\n\n Example use of the passthru mechanism:\n\n n = autosummary('l2loss', loss, passthru=n)\n\n This is a shorthand for the following code:\n\n with tf.control_dependencies([autosummary('l2loss', loss)]):\n n = tf.identity(n)\n \"\"\"\n tfutil.assert_tf_initialized()\n name_id = name.replace(\"/\", \"_\")\n\n if tfutil.is_tf_expression(value):\n with tf.name_scope(\"summary_\" + name_id), tf.device(value.device):\n condition = tf.convert_to_tensor(condition, name='condition')\n update_op = tf.cond(condition, lambda: tf.group(_create_var(name, value)), tf.no_op)\n with tf.control_dependencies([update_op]):\n return tf.identity(value if passthru is None else passthru)\n\n else: # python scalar or numpy array\n assert not tfutil.is_tf_expression(passthru)\n assert not tfutil.is_tf_expression(condition)\n if condition:\n if name not in _immediate:\n with tfutil.absolute_name_scope(\"Autosummary/\" + name_id), tf.device(None), tf.control_dependencies(None):\n update_value = tf.placeholder(_dtype)\n update_op = _create_var(name, update_value)\n _immediate[name] = update_op, update_value\n update_op, update_value = _immediate[name]\n tfutil.run(update_op, {update_value: value})\n return value if passthru is None else passthru\n\n\ndef finalize_autosummaries() -> None:\n \"\"\"Create the necessary ops to include autosummaries in TensorBoard report.\n Note: This should be done only once per graph.\n \"\"\"\n global _finalized\n tfutil.assert_tf_initialized()\n\n if _finalized:\n return None\n\n _finalized = True\n tfutil.init_uninitialized_vars([var for vars_list in _vars.values() for var in vars_list])\n\n # Create summary ops.\n with tf.device(None), tf.control_dependencies(None):\n for name, vars_list in _vars.items():\n name_id = name.replace(\"/\", \"_\")\n with tfutil.absolute_name_scope(\"Autosummary/\" + name_id):\n moments = tf.add_n(vars_list)\n moments /= moments[0]\n with tf.control_dependencies([moments]): # read before resetting\n reset_ops = [tf.assign(var, tf.zeros(3, dtype=_dtype)) for var in vars_list]\n with tf.name_scope(None), tf.control_dependencies(reset_ops): # reset before reporting\n mean = moments[1]\n std = tf.sqrt(moments[2] - tf.square(moments[1]))\n tf.summary.scalar(name, mean)\n if enable_custom_scalars:\n tf.summary.scalar(\"xCustomScalars/\" + name + \"/margin_lo\", mean - std)\n tf.summary.scalar(\"xCustomScalars/\" + name + \"/margin_hi\", mean + std)\n\n # Setup layout for custom scalars.\n layout = None\n if enable_custom_scalars:\n cat_dict = OrderedDict()\n for series_name in sorted(_vars.keys()):\n p = series_name.split(\"/\")\n cat = p[0] if len(p) >= 2 else \"\"\n chart = \"/\".join(p[1:-1]) if len(p) >= 3 else p[-1]\n if cat not in cat_dict:\n cat_dict[cat] = OrderedDict()\n if chart not in cat_dict[cat]:\n cat_dict[cat][chart] = []\n cat_dict[cat][chart].append(series_name)\n categories = []\n for cat_name, chart_dict in cat_dict.items():\n charts = []\n for chart_name, series_names in chart_dict.items():\n series = []\n for series_name in series_names:\n series.append(layout_pb2.MarginChartContent.Series(\n value=series_name,\n lower=\"xCustomScalars/\" + series_name + \"/margin_lo\",\n upper=\"xCustomScalars/\" + series_name + \"/margin_hi\"))\n margin = layout_pb2.MarginChartContent(series=series)\n charts.append(layout_pb2.Chart(title=chart_name, margin=margin))\n categories.append(layout_pb2.Category(title=cat_name, chart=charts))\n layout = summary_lib.custom_scalar_pb(layout_pb2.Layout(category=categories))\n return layout\n\ndef save_summaries(file_writer, global_step=None):\n \"\"\"Call FileWriter.add_summary() with all summaries in the default graph,\n automatically finalizing and merging them on the first call.\n \"\"\"\n global _merge_op\n tfutil.assert_tf_initialized()\n\n if _merge_op is None:\n layout = finalize_autosummaries()\n if layout is not None:\n file_writer.add_summary(layout)\n with tf.device(None), tf.control_dependencies(None):\n _merge_op = tf.summary.merge_all()\n\n file_writer.add_summary(_merge_op.eval(), global_step)\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.device", "tensorflow.is_finite", "tensorflow.control_dependencies", "tensorflow.zeros", "tensorflow.reduce_sum", "tensorflow.stack", "tensorflow.cast", "tensorflow.is_variable_initialized", "tensorflow.add_n", "tensorflow.summary.scalar", "tensorflow.assign_add", "tensorflow.name_scope", "tensorflow.square", "tensorflow.shape", "tensorflow.identity", "tensorflow.placeholder", "tensorflow.summary.merge_all", "tensorflow.constant", "tensorflow.reshape", "tensorflow.assign" ] ]
YashBit/generalized-hindsight
[ "2ba2e1ba0caa20b71d01c8ad70fbc7f47d61bd6c" ]
[ "rlkit/torch/sac/sac_gher.py" ]
[ "from collections import OrderedDict\n\nimport numpy as np\nimport torch\nimport torch.optim as optim\nfrom torch import nn as nn\n\nimport rlkit.torch.pytorch_util as ptu\nfrom rlkit.core.eval_util import create_stats_ordered_dict\nfrom rlkit.torch.torch_rl_algorithm import TorchTrainer\n\n\nclass SACTrainer(TorchTrainer):\n def __init__(\n self,\n env,\n policy,\n qf1,\n qf2,\n target_qf1,\n target_qf2,\n\n discount=0.99,\n reward_scale=1.0,\n policy_lr=1e-3,\n qf_lr=1e-3,\n optimizer_class=optim.Adam,\n\n soft_target_tau=1e-2,\n target_update_period=1,\n plotter=None,\n render_eval_paths=False,\n\n use_automatic_entropy_tuning=True,\n target_entropy=None,\n ):\n super().__init__()\n self.env = env\n self.policy = policy\n self.qf1 = qf1\n self.qf2 = qf2\n self.target_qf1 = target_qf1\n self.target_qf2 = target_qf2\n self.soft_target_tau = soft_target_tau\n self.target_update_period = target_update_period\n\n self.use_automatic_entropy_tuning = use_automatic_entropy_tuning\n if self.use_automatic_entropy_tuning:\n if target_entropy:\n self.target_entropy = target_entropy\n else:\n self.target_entropy = -np.prod(self.env.action_space.shape).item() # heuristic value from Tuomas\n self.log_alpha = ptu.zeros(1, requires_grad=True)\n self.alpha_optimizer = optimizer_class(\n [self.log_alpha],\n lr=policy_lr,\n )\n\n self.plotter = plotter\n self.render_eval_paths = render_eval_paths\n\n self.qf_criterion = nn.MSELoss()\n self.vf_criterion = nn.MSELoss()\n\n self.policy_optimizer = optimizer_class(\n self.policy.parameters(),\n lr=policy_lr,\n )\n self.qf1_optimizer = optimizer_class(\n self.qf1.parameters(),\n lr=qf_lr,\n )\n self.qf2_optimizer = optimizer_class(\n self.qf2.parameters(),\n lr=qf_lr,\n )\n\n self.discount = discount\n self.reward_scale = reward_scale\n self.eval_statistics = OrderedDict()\n self._n_train_steps_total = 0\n self._need_to_update_eval_statistics = True\n\n def train_from_torch(self, batch):\n rewards = batch['rewards']\n terminals = batch['terminals']\n obs = batch['observations']\n actions = batch['actions']\n next_obs = batch['next_observations']\n latents = batch['latents']\n\n # obs = torch.cat([obs, latents], dim=1)\n # next_obs = torch.cat([next_obs, latents], dim=1)\n\n print(f\"VALUES IN SAC_GHER\")\n print(f\"Rewards: {rewards}, latents: {latents}\")\n print(f\"Shape of rewards: {rewards.shape}. obs shape: {obs.shape}, latents: {latents.shape}\")\n\n \"\"\"\n Policy and Alpha Loss\n \"\"\"\n new_obs_actions, policy_mean, policy_log_std, log_pi, *_ = self.policy(\n obs, latents, reparameterize=True, return_log_prob=True\n )\n if self.use_automatic_entropy_tuning:\n alpha_loss = -(self.log_alpha * (log_pi + self.target_entropy).detach()).mean()\n self.alpha_optimizer.zero_grad()\n alpha_loss.backward()\n self.alpha_optimizer.step()\n alpha = self.log_alpha.exp()\n else:\n alpha_loss = 0\n alpha = 1\n\n q_new_actions = torch.min(\n self.qf1(obs, new_obs_actions, latents),\n self.qf2(obs, new_obs_actions, latents),\n )\n policy_loss = (alpha * log_pi - q_new_actions).mean()\n\n \"\"\"\n QF Loss\n \"\"\"\n q1_pred = self.qf1(obs, actions, latents)\n q2_pred = self.qf2(obs, actions, latents)\n # Make sure policy accounts for squashing functions like tanh correctly!\n new_next_actions, _, _, new_log_pi, *_ = self.policy(\n next_obs, latents, reparameterize=True, return_log_prob=True\n )\n target_q_values = torch.min(\n self.target_qf1(next_obs, new_next_actions, latents),\n self.target_qf2(next_obs, new_next_actions, latents),\n ) - alpha * new_log_pi\n\n q_target = self.reward_scale * rewards + (1. - terminals) * self.discount * target_q_values\n qf1_loss = self.qf_criterion(q1_pred, q_target.detach())\n qf2_loss = self.qf_criterion(q2_pred, q_target.detach())\n\n \"\"\"\n Update networks\n \"\"\"\n self.policy_optimizer.zero_grad()\n policy_loss.backward()\n self.policy_optimizer.step()\n\n self.qf1_optimizer.zero_grad()\n qf1_loss.backward()\n self.qf1_optimizer.step()\n\n self.qf2_optimizer.zero_grad()\n qf2_loss.backward()\n self.qf2_optimizer.step()\n\n \"\"\"\n Soft Updates\n \"\"\"\n if self._n_train_steps_total % self.target_update_period == 0:\n ptu.soft_update_from_to(\n self.qf1, self.target_qf1, self.soft_target_tau\n )\n ptu.soft_update_from_to(\n self.qf2, self.target_qf2, self.soft_target_tau\n )\n\n \"\"\"\n Save some statistics for eval\n \"\"\"\n if self._need_to_update_eval_statistics:\n self._need_to_update_eval_statistics = False\n \"\"\"\n Eval should set this to None.\n This way, these statistics are only computed for one batch.\n \"\"\"\n policy_loss = (log_pi - q_new_actions).mean()\n\n self.eval_statistics['QF1 Loss'] = np.mean(ptu.get_numpy(qf1_loss))\n self.eval_statistics['QF2 Loss'] = np.mean(ptu.get_numpy(qf2_loss))\n self.eval_statistics['Policy Loss'] = np.mean(ptu.get_numpy(\n policy_loss\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Q1 Predictions',\n ptu.get_numpy(q1_pred),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Q2 Predictions',\n ptu.get_numpy(q2_pred),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Q Targets',\n ptu.get_numpy(q_target),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Log Pis',\n ptu.get_numpy(log_pi),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Policy mu',\n ptu.get_numpy(policy_mean),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Policy log std',\n ptu.get_numpy(policy_log_std), \n ))\n if self.use_automatic_entropy_tuning:\n self.eval_statistics['Alpha'] = alpha.item()\n self.eval_statistics['Alpha Loss'] = alpha_loss.item()\n self._n_train_steps_total += 1\n\n def get_diagnostics(self):\n return self.eval_statistics\n\n def end_epoch(self, epoch):\n self._need_to_update_eval_statistics = True\n\n @property\n def networks(self):\n return [\n self.policy,\n self.qf1,\n self.qf2,\n self.target_qf1,\n self.target_qf2,\n ]\n\n def get_snapshot(self):\n return dict(\n policy=self.policy,\n qf1=self.qf1,\n qf2=self.qf2,\n target_qf1=self.qf1,\n target_qf2=self.qf2,\n )\n" ]
[ [ "torch.nn.MSELoss", "numpy.prod" ] ]
apacha/Mensural-Detector
[ "d924a651bca5ccb97c7b45861b9ef5ef6e4cb26e" ]
[ "position_classification/train_model.py" ]
[ "import argparse\nimport datetime\nimport os\nfrom time import time\n\nimport keras\nimport numpy\nimport numpy as np\nfrom IPython import get_ipython\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, TensorBoard\nfrom keras.preprocessing.image import ImageDataGenerator, DirectoryIterator\nfrom keras_tqdm import TQDMCallback, TQDMNotebookCallback\n\nfrom position_classification.reporting import sklearn_reporting\nfrom position_classification.reporting.TrainingHistoryPlotter import TrainingHistoryPlotter\n\nfrom position_classification.ClassWeightCalculator import ClassWeightCalculator\nfrom position_classification.models.ConfigurationFactory import ConfigurationFactory\n\n\ndef running_inside_jupyter_notebook():\n try:\n shell = get_ipython().__class__.__name__\n if shell == 'ZMQInteractiveShell':\n return True # Jupyter notebook or qtconsole\n elif shell == 'TerminalInteractiveShell':\n return False # Terminal running IPython\n else:\n return False # Other type (?)\n except NameError:\n return False # Probably standard Python interpreter\n\n\ndef train_model(dataset_directory: str, model_name: str,\n width: int, height: int, output_name: str = None):\n print(\"Loading configuration and data-readers...\")\n start_time = time()\n\n number_of_classes = len(os.listdir(os.path.join(dataset_directory, \"training\")))\n training_configuration = ConfigurationFactory.get_configuration_by_name(model_name, width, height,\n number_of_classes)\n\n train_generator = ImageDataGenerator(rotation_range=training_configuration.rotation_range,\n zoom_range=training_configuration.zoom_range)\n training_data_generator = DirectoryIterator(\n directory=os.path.join(dataset_directory, \"training\"),\n image_data_generator=train_generator,\n target_size=(training_configuration.input_image_rows,\n training_configuration.input_image_columns),\n batch_size=training_configuration.training_minibatch_size,\n )\n training_steps_per_epoch = np.math.ceil(training_data_generator.samples / training_data_generator.batch_size)\n\n validation_generator = ImageDataGenerator()\n validation_data_generator = DirectoryIterator(\n directory=os.path.join(dataset_directory, \"validation\"),\n image_data_generator=validation_generator,\n target_size=(\n training_configuration.input_image_rows,\n training_configuration.input_image_columns),\n batch_size=training_configuration.training_minibatch_size)\n validation_steps_per_epoch = np.math.ceil(validation_data_generator.samples / validation_data_generator.batch_size)\n\n test_generator = ImageDataGenerator()\n test_data_generator = DirectoryIterator(\n directory=os.path.join(dataset_directory, \"test\"),\n image_data_generator=test_generator,\n target_size=(training_configuration.input_image_rows,\n training_configuration.input_image_columns),\n batch_size=training_configuration.training_minibatch_size,\n shuffle=False)\n test_steps_per_epoch = np.math.ceil(test_data_generator.samples / test_data_generator.batch_size)\n\n model = training_configuration.classifier()\n model.summary()\n\n print(\"Model {0} loaded.\".format(training_configuration.name()))\n print(training_configuration.summary())\n\n start_of_training = datetime.date.today()\n if output_name is None:\n best_model_path = \"{0}_{1}.h5\".format(start_of_training, training_configuration.name())\n else:\n best_model_path = \"{0}.h5\".format(output_name)\n\n monitor_variable = 'val_acc'\n\n model_checkpoint = ModelCheckpoint(best_model_path, monitor=monitor_variable, save_best_only=True, verbose=1)\n early_stop = EarlyStopping(monitor=monitor_variable,\n patience=training_configuration.number_of_epochs_before_early_stopping,\n verbose=1)\n learning_rate_reduction = ReduceLROnPlateau(monitor=monitor_variable,\n patience=training_configuration.number_of_epochs_before_reducing_learning_rate,\n verbose=1,\n factor=training_configuration.learning_rate_reduction_factor,\n min_lr=training_configuration.minimum_learning_rate)\n if output_name is None:\n log_directory = \"./logs/{0}_{1}/\".format(start_of_training, training_configuration.name())\n else:\n log_directory = \"./logs/{0}/\".format(output_name)\n\n tensorboard_callback = TensorBoard(\n log_dir=log_directory,\n batch_size=training_configuration.training_minibatch_size)\n\n if running_inside_jupyter_notebook():\n callbacks = [model_checkpoint, early_stop, tensorboard_callback, learning_rate_reduction,\n TQDMNotebookCallback()]\n else:\n callbacks = [model_checkpoint, early_stop, tensorboard_callback, learning_rate_reduction, TQDMCallback()]\n\n class_weight_calculator = ClassWeightCalculator()\n balancing_method = \"skBalance\"\n class_weights = class_weight_calculator.calculate_class_weights(dataset_directory,\n method=balancing_method,\n class_indices=training_data_generator.class_indices)\n if balancing_method is not None:\n print(\"Using {0} method for obtaining class weights to compensate for an unbalanced dataset.\".format(\n balancing_method))\n\n print(\"Training on dataset...\")\n history = model.fit_generator(\n generator=training_data_generator,\n steps_per_epoch=training_steps_per_epoch,\n epochs=training_configuration.number_of_epochs,\n callbacks=callbacks,\n validation_data=validation_data_generator,\n validation_steps=validation_steps_per_epoch,\n class_weight=class_weights,\n verbose=0\n )\n\n print(\"Loading best model from check-point and testing...\")\n best_model = keras.models.load_model(best_model_path)\n\n test_data_generator.reset()\n file_names = test_data_generator.filenames\n class_labels = os.listdir(os.path.join(dataset_directory, \"test\"))\n # Notice that some classes have so few elements, that they are not present in the test-set and do not\n # appear in the final report. To obtain the correct classes, we have to enumerate all non-empty class\n # directories inside the test-folder and use them as labels\n names_of_classes_with_test_data = [\n class_name for class_name in class_labels\n if os.listdir(os.path.join(dataset_directory, \"test\", class_name))]\n true_classes = test_data_generator.classes\n predictions = best_model.predict_generator(test_data_generator, steps=test_steps_per_epoch)\n predicted_classes = numpy.argmax(predictions, axis=1)\n\n test_data_generator.reset()\n evaluation = best_model.evaluate_generator(test_data_generator, steps=test_steps_per_epoch)\n classification_accuracy = 0\n\n print(\"Reporting classification statistics with micro average\")\n report = sklearn_reporting.classification_report(true_classes, predicted_classes, digits=3,\n target_names=names_of_classes_with_test_data, average='micro')\n print(report)\n\n print(\"Reporting classification statistics with macro average\")\n report = sklearn_reporting.classification_report(true_classes, predicted_classes, digits=3,\n target_names=names_of_classes_with_test_data, average='macro')\n print(report)\n\n print(\"Reporting classification statistics with weighted average\")\n report = sklearn_reporting.classification_report(true_classes, predicted_classes, digits=3,\n target_names=names_of_classes_with_test_data, average='weighted'\n )\n print(report)\n\n indices_of_misclassified_files = [i for i, e in enumerate(true_classes - predicted_classes) if e != 0]\n misclassified_files = [file_names[i] for i in indices_of_misclassified_files]\n misclassified_files_actual_prediction_indices = [predicted_classes[i] for i in indices_of_misclassified_files]\n misclassified_files_actual_prediction_classes = [class_labels[i] for i in\n misclassified_files_actual_prediction_indices]\n print(\"Misclassified files:\")\n for i in range(len(misclassified_files)):\n print(\"\\t{0} is incorrectly classified as {1}\".format(misclassified_files[i],\n misclassified_files_actual_prediction_classes[i]))\n\n for i in range(len(best_model.metrics_names)):\n current_metric = best_model.metrics_names[i]\n print(\"{0}: {1:.5f}\".format(current_metric, evaluation[i]))\n if current_metric == 'acc' or current_metric == 'output_class_acc':\n classification_accuracy = evaluation[i]\n print(\"Total Accuracy: {0:0.5f}%\".format(classification_accuracy * 100))\n print(\"Total Error: {0:0.5f}%\".format((1 - classification_accuracy) * 100))\n\n end_time = time()\n print(\"Execution time: {0:.1f}s\".format(end_time - start_time))\n training_result_image = \"{1}_{0}_{2:.1f}p.png\".format(training_configuration.name(), start_of_training,\n classification_accuracy * 100)\n TrainingHistoryPlotter.plot_history(history, training_result_image)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.register(\"type\", \"bool\", lambda v: v.lower() == \"true\")\n parser.add_argument(\"--dataset_directory\", type=str, default=\"data\",\n help=\"The directory, that is used for storing the images during training\")\n parser.add_argument(\"--model_name\", type=str, default=\"inception_resnet_v2\",\n help=\"The model used for training the network. Run ListAvailableConfigurations.ps1 or \"\n \"models/ConfigurationFactory.py to get a list of all available configurations\")\n parser.add_argument(\"--output_name\", type=str, default=None, required=False,\n help=\"An optional name of the output file, that should be used. If non specified, an automatic name will be assigned with the timestamp and selected model.\")\n parser.add_argument(\"--width\", default=224, type=int, help=\"Width of the input-images for the network in pixel\")\n parser.add_argument(\"--height\", default=448, type=int, help=\"Height of the input-images for the network in pixel\")\n\n flags, unparsed = parser.parse_known_args()\n\n # Use these lines to restrict execution to only use 40% of the GPU's RAM\n # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)\n # sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\n train_model(dataset_directory=flags.dataset_directory,\n model_name=flags.model_name,\n width=flags.width,\n height=flags.height,\n output_name=flags.output_name)\n" ]
[ [ "numpy.argmax", "numpy.math.ceil" ] ]
fmarberg/optking
[ "c50f3f89bcdbb66f779be57277fcd91a74ad677e" ]
[ "optking/displace.py" ]
[ "import numpy as np\nimport logging\n\nfrom . import intcosMisc\nfrom .exceptions import AlgError, OptError\nfrom . import optparams as op\nfrom .linearAlgebra import abs_max, rms, symm_mat_inv\n\n# Functions in this file displace.py\n#\n# displace_molsys: Displace each fragment. Displace dimer coordinates.\n# displace_frag : Displace a fragment by dq. Double check frozen coordinates\n# are satisfied. Reduce stepsize as needed if\n# ensure_convergence is true.\n# back_transformation: Call dq_to_dx iteratively to try to converge to desired\n# Dq as much as possible.\n# dq_to_dx : Given Delta(q), compute and invert B, take Delta(x) step.\n\n# Displace molecular system\ndef displace_molsys(oMolsys, dq, fq=None, ensure_convergence=False):\n \"\"\" Manage internal coordinate step for a molecular system\n Parameters\n ----------\n oMolsys : Molsys\n input molecular system\n dq : ndarray\n input coordinate step\n fq : forces in internal coordinates (used for printing).\n \"\"\"\n logger = logging.getLogger(__name__)\n\n q = oMolsys.q_array()\n q_target = q + dq\n # q_target is used for dimer coordinates; does it need corrected for\n # dihedrals through pi here? don't think so.\n #print('Target q')\n #print(q_target)\n geom_orig = oMolsys.geom\n\n forces = None\n for iF,F in enumerate(oMolsys._fragments):\n if F.frozen:\n logger.info(\"\\tFragment %d is frozen, so not displacing\" % (iF+1))\n continue\n \n logger.info(\"\\tDetermining Cartesian step for fragment %d.\" % (iF+1))\n conv = displace_frag(F, dq[oMolsys.frag_intco_slice(iF)], ensure_convergence)\n if conv:\n logger.info(\"\\tStep for fragment succeeded.\")\n else:\n logger.info(\"\\tStep for fragment falied.\")\n logger.warning(\"\\tStep for fragment succeeded.\")\n\n for i, DI in enumerate(oMolsys._dimer_intcos):\n logger.info(\"\\tStep for dimer coordinates for fragments %d and %d.\"\n % (DI.a_idx + 1, DI.b_idx + 1))\n Axyz = oMolsys.frag_geom(DI.a_idx)\n Bxyz = oMolsys.frag_geom(DI.b_idx)\n Bxyz[:] = DI.orient_fragment(Axyz, Bxyz,\n q_target[oMolsys.dimerfrag_intco_slice(i)])\n\n geom_final = oMolsys.geom\n # Analyze relative to original input geometry\n oMolsys.geom = geom_orig\n oMolsys.update_dihedral_orientations()\n oMolsys.fix_bend_axes()\n q_orig = oMolsys.q_array()\n qShow_orig = oMolsys.q_show_array()\n\n oMolsys.geom = geom_final\n q_final = oMolsys.q_array()\n qShow_final = oMolsys.q_show_array()\n\n # Set dq to final, total displacement ACHIEVED\n dq[:] = q_final - q_orig\n dqShow = qShow_final - qShow_orig\n oMolsys.unfix_bend_axes()\n\n coordinate_change_report = (\n \"\\n\\n\\t --- Internal Coordinate Step in ANG or DEG, aJ/ANG or AJ/DEG ---\\n\")\n coordinate_change_report += (\n \"\\t-----------------------------------------------------------------------------\\n\")\n\n if type(fq) == type(None):\n coordinate_change_report += (\n \"\\t Coordinate Previous Change New \\n\")\n coordinate_change_report += (\n \"\\t ---------- -------- ------ ------\\n\")\n for i in range(len(dq)):\n coordinate_change_report += (\"\\t%19s%14.5f%14.5f%14.5f\\n\"\n % (i, qShow_orig[i], dqShow[i], qShow_final[i]))\n else:\n coordinate_change_report += (\n \"\\t Coordinate Previous Force Change New \\n\")\n coordinate_change_report += (\n \"\\t ---------- -------- ------ ------ ------\\n\")\n for i in range(len(dq)):\n coordinate_change_report += (\"\\t%19s%14.5f%14.5f%14.5f%14.5f\\n\"\n % (i, qShow_orig[i], fq[i], dqShow[i], qShow_final[i]))\n coordinate_change_report += (\n \"\\t-----------------------------------------------------------------------------\\n\")\n logger.info(coordinate_change_report)\n\n return\n\n\ndef displace_frag(F, dq, ensure_convergence=False):\n \"\"\" Converts internal coordinate step into the new cartesian geometry\n Parameters\n ----------\n F : Fragment\n dq : ndarray\n step (displacement) in internal coordiantes\n overriden to actual displacements performed\n ensure_convergence : bool\n reduce the magntitude of the step size as necessary until the\n iterative back-transformation actually converges.\n \"\"\"\n logger = logging.getLogger(__name__)\n geom = F.geom\n if not F.num_intcos or not len(geom) or not len(dq):\n dq[:] = 0\n return\n\n geom_orig = np.copy(geom)\n dq_orig = np.copy(dq)\n q_orig = F.q_array()\n\n best_geom = np.zeros(geom_orig.shape)\n conv = False # is back-transformation converged?\n\n if ensure_convergence:\n cnt = -1\n\n while not conv:\n cnt += 1\n if cnt > 0:\n logger.info(\"\\tReducing step-size by a factor of %d.\" % (2 * cnt))\n dq[:] = dq_orig / (2.0 * cnt)\n\n F.fix_bend_axes()\n F.update_dihedral_orientations()\n conv = back_transformation(F.intcos, geom, dq, op.Params.print_lvl)\n F.unfix_bend_axes()\n\n if not conv:\n if cnt == 5:\n logger.warning(\n \"\\tUnable to back-transform even 1/10th of the desired step rigorously.\"\n + \"\\tContinuing with best (small) step\")\n break\n else:\n geom[:] = geom_orig # put original geometry back for next try at smaller step.\n\n if conv and cnt > 0: # We were able to take a modest step. Try to complete it.\n logger.info(\n \"\\tAble to take a small step; trying another partial back-transformations.\\n\")\n\n for j in range(1, 2 * cnt):\n logger.info(\"\\tMini-step %d of %d.\\n\", (j + 1, 2 * cnt))\n dq[:] = dq_orig / (2 * cnt)\n\n best_geom[:] = geom\n\n F.fix_bend_axes()\n conv = back_transformation(F.intcos, geom, dq, op.Params.print_lvl)\n F.unfix_bend_axes()\n\n if not conv:\n logger.warning(\n \"\\tCouldn't converge this mini-step; quitting with previous geometry.\\n\")\n geom[:] = best_geom\n break\n\n else: # try to back-transform, but continue even if desired dq is not achieved\n F.fix_bend_axes()\n F.update_dihedral_orientations()\n conv = back_transformation(F.intcos, geom, dq, op.Params.print_lvl)\n F.unfix_bend_axes()\n\n if op.Params.opt_type == \"IRC\" and not conv:\n raise OptError(\"Could not take constrained step in an IRC computation.\")\n\n # Fix drift/error in any frozen coordinates\n frozen_conv = True\n if any(intco.frozen for intco in F.intcos):\n\n # Set dq for unfrozen intcos to zero.\n F.update_dihedral_orientations()\n F.fix_bend_axes()\n dq_adjust_frozen = q_orig - intcosMisc.q_values(F.intcos, geom)\n\n for i, intco in enumerate(F.intcos):\n if not intco.frozen:\n dq_adjust_frozen[i] = 0\n\n frozen_msg = (\n \"\\tAdditional back-transformation to adjust frozen coordinates: \")\n\n frozen_conv = back_transformation(F.intcos, geom,\n dq_adjust_frozen,\n op.Params.print_lvl - 1, # suppress printing\n bt_dx_conv=1.0e-12,\n bt_dx_rms_change_conv=1.0e-12,\n bt_max_iter=100)\n\n F.unfix_bend_axes()\n\n if frozen_conv:\n frozen_msg += \"successful.\\n\"\n logger.info(frozen_msg)\n else:\n frozen_msg += \"unsuccessful, but continuing.\\n\"\n logger.info(frozen_msg)\n logger.warning(frozen_msg)\n\n # Make sure final Dq is actual change\n q_final = intcosMisc.q_values(F.intcos, geom)\n dq[:] = q_final - q_orig\n\n if op.Params.print_lvl >= 1:\n frag_report = \"\\tReport of back-transformation: (au)\\n\"\n frag_report += \"\\n\\t int q_final q_target Error\\n\"\n frag_report += \"\\t---------------------------------------------------\\n\"\n q_target = q_orig + dq_orig\n for i in range(F.num_intcos):\n frag_report += (\"\\t%5d%15.10lf%15.10f%15.10lf\\n\"\n % (i + 1, q_final[i], q_target[i], (q_final - q_target)[i]))\n frag_report += \"\\t--------------------------------------------------\\n\"\n logger.debug(frag_report)\n\n return conv and frozen_conv\n\n\ndef back_transformation(intcos, geom, dq, print_lvl,\n bt_dx_conv=None, bt_dx_rms_change_conv=None, bt_max_iter=None):\n\n logger = logging.getLogger(__name__)\n dx_rms_last = -1\n if bt_dx_conv is None:\n bt_dx_conv = op.Params.bt_dx_conv\n if bt_dx_rms_change_conv is None:\n bt_dx_rms_change_conv = op.Params.bt_dx_rms_change_conv\n if bt_max_iter is None:\n bt_max_iter = op.Params.bt_max_iter\n\n q_orig = intcosMisc.q_values(intcos, geom)\n q_target = q_orig + dq\n\n if print_lvl > 1:\n target_step_str = \"Back-transformation in back_transformation():\\n\"\n target_step_str += \" Original Target Dq\\n\"\n for i in range(len(dq)):\n target_step_str += \"%15.10f%15.10f%15.10f\\n\" % (q_orig[i], q_target[i], dq[i])\n logger.info(target_step_str)\n\n if print_lvl > 0:\n step_iter_str = \"\\n\\n\\t---------------------------------------------------\\n\"\n step_iter_str += \"\\t Iter RMS(dx) Max(dx) RMS(dq) \\n\"\n step_iter_str += \"\\t---------------------------------------------------\\n\"\n\n new_geom = np.copy(geom) # cart geometry to start each iter\n best_geom = np.zeros(new_geom.shape)\n\n bt_iter_continue = True\n bt_converged = False\n bt_iter_cnt = 0\n\n while bt_iter_continue:\n\n #dq_rms = rms(dq)\n dx_rms, dx_max = dq_to_dx(intcos, geom, dq, print_lvl > 2)\n\n # Met convergence thresholds\n if dx_rms < bt_dx_conv and dx_max < bt_dx_conv:\n bt_converged = True\n bt_iter_continue = False\n # No further progress toward convergence.\n elif (np.absolute(dx_rms - dx_rms_last) < bt_dx_rms_change_conv\n or bt_iter_cnt >= bt_max_iter or dx_rms > 100.0):\n bt_converged = False\n bt_iter_continue = False\n\n dx_rms_last = dx_rms\n\n new_q = intcosMisc.q_values(intcos, geom)\n dq[:] = q_target - new_q\n del new_q\n\n dq_rms = rms(dq)\n if bt_iter_cnt == 0 or dq_rms < best_dq_rms: # short circuit evaluation\n best_geom[:] = geom\n best_dq_rms = dq_rms\n\n if print_lvl > 0:\n step_iter_str += (\"\\t%5d %14.1e %14.1e %14.1e\\n\"\n % (bt_iter_cnt + 1, dx_rms, dx_max, dq_rms))\n bt_iter_cnt += 1\n\n if print_lvl > 0:\n step_iter_str += \"\\t---------------------------------------------------\\n\"\n logger.info(step_iter_str)\n\n if bt_converged:\n logger.info(\"\\tSuccessfully converged to displaced geometry.\")\n else:\n logger.warning(\"\\tUnable to completely converge to displaced geometry.\")\n\n if dq_rms > best_dq_rms:\n logger.warning(\"\\tPrevious geometry is closer to target in internal coordinates,\"\n + \" so using that one.\\n\")\n logger.warning(\"\\tBest geometry has RMS(Delta(q)) = %8.2e\\n\" % best_dq_rms)\n geom[:] = best_geom\n\n return bt_converged\n\n\n# Convert dq to dx. Geometry is updated.\n# B dx = dq\n# B dx = (B Bt)(B Bt)^-1 dq\n# B (dx) = B * [Bt (B Bt)^-1 dq]\n# dx = Bt (B Bt)^-1 dq\n# dx = Bt G^-1 dq, where G = B B^t.\ndef dq_to_dx(intcos, geom, dq, printDetails=False):\n \"\"\" Convert dq to dx. Geometry is updated\n\n Parameters\n ----------\n intcos : list of Stre, Bend, Tors, or Oofp\n geom : ndarray\n cartesian geometry updated to new geometry\n dq : displacement in internal coordinates\n\n Returns\n -------\n float :\n rms of cartesian displacement\n float :\n absolute maximum of cartesian displacement\n \"\"\"\n logger = logging.getLogger(__name__)\n B = intcosMisc.Bmat(intcos, geom)\n G = np.dot(B, B.T)\n Ginv = symm_mat_inv(G, redundant=True)\n tmp_v_Nint = np.dot(Ginv, dq)\n dx = np.dot(B.T, tmp_v_Nint)\n\n if printDetails:\n qOld = intcosMisc.q_values(intcos, geom)\n\n geom += dx.reshape(geom.shape)\n\n if printDetails:\n dq_achieved = intcosMisc.q_values(intcos, geom) - qOld\n displacement_str = \"\\t Report of Single-step\\n\"\n displacement_str += \"\\t int dq_achieved deviation from target\\n\"\n for i in range(len(intcos)):\n displacement_str += \"\\t%5d%15.10f%15.10f\\n\" % (i + 1,\n dq_achieved[i], dq_achieved[i] - dq[i])\n logger.debug(displacement_str)\n\n dx_rms = rms(dx)\n dx_max = abs_max(dx)\n del B, G, Ginv, tmp_v_Nint, dx\n return dx_rms, dx_max\n" ]
[ [ "numpy.dot", "numpy.copy", "numpy.absolute", "numpy.zeros" ] ]
Kaoschuks/CrypTen
[ "c181518da7e2cec14402e64ccc0bf55743b3f765" ]
[ "test/test_gradients.py" ]
[ "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport copy\nimport itertools\nimport logging\nimport unittest\nfrom collections import namedtuple\n\nimport crypten\nimport torch\nimport torch.nn.functional as F\nfrom crypten.common.tensor_types import is_float_tensor\nfrom crypten.gradients import AutogradContext\nfrom test.multiprocess_test_case import (\n MultiProcessTestCase,\n get_random_test_tensor,\n onehot,\n)\n\n\n# Sizes for tensor operations\nSIZES = [\n (),\n (1,),\n (3,),\n (1, 1),\n (1, 3),\n (3, 1),\n (3, 3),\n (1, 1, 1),\n (1, 1, 3),\n (1, 3, 1),\n (3, 1, 1),\n (3, 3, 3),\n (1, 1, 1, 1),\n (1, 1, 3, 1),\n (3, 3, 3, 3),\n]\n\n\nclass TestGradients:\n \"\"\"\n This class tests all autograd functions implemented in gradients.py.\n \"\"\"\n\n def setUp(self):\n super().setUp()\n # We don't want the main process (rank -1) to initialize the communicator\n if self.rank >= 0:\n crypten.init()\n\n def _check(self, encrypted_tensor, reference, msg, tolerance=None):\n if tolerance is None:\n tolerance = getattr(self, \"default_tolerance\", 0.05)\n tensor = encrypted_tensor.get_plain_text()\n\n # Check sizes match\n self.assertTrue(tensor.size() == reference.size(), msg)\n\n self.assertTrue(is_float_tensor(reference), \"reference must be a float\")\n diff = (tensor - reference).abs_()\n norm_diff = diff.div(tensor.abs() + reference.abs()).abs_()\n test_passed = norm_diff.le(tolerance) + diff.le(tolerance * 0.1)\n test_passed = test_passed.gt(0).all().item() == 1\n if not test_passed:\n logging.info(msg)\n logging.info(\"Result %s\" % tensor)\n logging.info(\"Result - Reference = %s\" % (tensor - reference))\n self.assertTrue(test_passed, msg=msg)\n\n def _check_forward_backward(\n self,\n func_name,\n input_tensor,\n *args,\n torch_func_name=None,\n msg=None,\n addl_args=None,\n **kwargs,\n ):\n \"\"\"Checks forward and backward against PyTorch\n\n Args:\n func_name (str): PyTorch/CrypTen function name\n input_tensor (torch.tensor): primary input\n args (list): contains arguments for function\n msg (str): additional message for mismatch\n kwargs (list): keyword arguments for function\n \"\"\"\n\n if msg is None:\n msg = f\"{func_name} grad_fn incorrect\"\n\n input = input_tensor.clone()\n input.requires_grad = True\n input_encr = crypten.cryptensor(input, requires_grad=True)\n\n crypten_kwargs = copy.deepcopy(kwargs)\n if addl_args is not None:\n for item, val in addl_args.items():\n crypten_kwargs[item] = val\n\n for private in [False, True]:\n input.grad = None\n input_encr.grad = None\n args = self._set_grad_to_zero(args)\n args_encr = self._set_grad_to_zero(list(args), make_private=private)\n\n # obtain torch function\n if torch_func_name is not None:\n torch_func = self._get_torch_func(torch_func_name)\n else:\n torch_func = self._get_torch_func(func_name)\n\n reference = torch_func(input, *args, **kwargs)\n encrypted_out = getattr(input_encr, func_name)(*args_encr, **crypten_kwargs)\n\n # extract argmax output for max / min with keepdim=False\n if isinstance(encrypted_out, (list, tuple)):\n reference = reference[0]\n encrypted_out = encrypted_out[0]\n\n self._check(encrypted_out, reference, msg + \" in forward\")\n\n # check backward pass\n grad_output = get_random_test_tensor(\n max_value=2, size=reference.size(), is_float=True\n )\n grad_output_encr = crypten.cryptensor(grad_output)\n reference.backward(grad_output)\n encrypted_out.backward(grad_output_encr)\n\n self._check(input_encr.grad, input.grad, msg + \" in backward\")\n for i, arg_encr in enumerate(args_encr):\n if crypten.is_encrypted_tensor(arg_encr):\n self._check(arg_encr.grad, args[i].grad, msg + \" in backward args\")\n\n def _set_grad_to_zero(self, args, make_private=False):\n \"\"\"Sets gradients for args to zero\n\n Args:\n args (list of torch.tensors): contains arguments\n make_private (bool): encrypt args using CrypTensor\n \"\"\"\n args_zero_grad = []\n\n for arg in args:\n if is_float_tensor(arg) and make_private:\n arg = crypten.cryptensor(arg, requires_grad=True)\n elif is_float_tensor(arg):\n arg.requires_grad = True\n arg.grad = None\n\n args_zero_grad.append(arg)\n\n return args_zero_grad\n\n def _get_torch_func(self, func_name):\n \"\"\"Returns PyTorch function from tensor or functional API\"\"\"\n if hasattr(torch.Tensor, func_name):\n return getattr(torch.Tensor, func_name)\n elif hasattr(F, func_name):\n return getattr(F, func_name)\n else:\n raise ValueError(\"unknown PyTorch function: %s\" % func_name)\n\n def test_arithmetic(self):\n \"\"\"Tests arithmetic functions with broadcasting.\"\"\"\n arithmetic_functions = [\"add\", \"sub\", \"mul\"]\n for func in arithmetic_functions:\n # Test on operator\n ofunc = \"__\" + func + \"__\"\n\n # Test both left functions and right functions\n rfunc = ofunc[:2] + \"r\" + ofunc[2:]\n\n # Test on both float inputs and tensor inputs\n for use_tensor in [False, True]:\n for size1 in SIZES:\n tensor1 = get_random_test_tensor(size=size1, is_float=True)\n if use_tensor:\n for size2 in SIZES:\n tensor2 = get_random_test_tensor(size=size2, is_float=True)\n self._check_forward_backward(func, tensor1, tensor2)\n self._check_forward_backward(ofunc, tensor1, tensor2)\n self._check_forward_backward(rfunc, tensor1, tensor2)\n else:\n scalar = 2.0\n self._check_forward_backward(func, tensor1, scalar)\n self._check_forward_backward(ofunc, tensor1, scalar)\n self._check_forward_backward(rfunc, tensor1, scalar)\n\n def test_div(self):\n self._div_helper(\"div\")\n\n def test_truediv(self):\n self._div_helper(\"__truediv__\")\n\n def test_rtruediv(self):\n self._div_helper(\"__rtruediv__\")\n\n def _div_helper(self, func):\n for size1 in SIZES:\n tensor1 = get_random_test_tensor(size=size1, is_float=True)\n for size2 in SIZES:\n tensor2 = get_random_test_tensor(\n min_value=0.5, size=size2, is_float=True\n ) # do not divide by value very close to zero\n if func == \"__rtruediv__\":\n # denominator is first argument for rtruediv\n self._check_forward_backward(func, tensor2, tensor1)\n else:\n self._check_forward_backward(func, tensor1, tensor2)\n\n if func == \"__rtruediv__\":\n self._check_forward_backward(func, torch.tensor(2.0), tensor2)\n else:\n self._check_forward_backward(func, tensor1, 2.0)\n\n def test_sum_mean_reductions(self):\n reductions = [\"sum\", \"mean\"]\n self._reductions_helper(reductions)\n\n def test_max_min_reductions_pairwise(self):\n reductions = [\"max\", \"min\"]\n self._reductions_helper(reductions, \"pairwise\")\n\n def test_max_min_reductions_log_reduction(self):\n reductions = [\"max\", \"min\"]\n self._reductions_helper(reductions, \"log_reduction\")\n\n def test_max_min_reductions_double_log_reduction(self):\n reductions = [\"max\", \"min\"]\n self._reductions_helper(reductions, \"double_log_reduction\")\n\n def test_max_min_reductions_accelerated_cascade(self):\n reductions = [\"max\", \"min\"]\n self._reductions_helper(reductions, \"accelerated_cascade\")\n\n def _reductions_helper(self, input_reductions, method=None):\n \"\"\"Tests input reductions on tensors of various sizes.\"\"\"\n for size in SIZES[: min(5, len(SIZES))]:\n tensor = get_random_test_tensor(size=size, is_float=True)\n for reduction in input_reductions:\n if method is None:\n self._check_forward_backward(reduction, tensor)\n else:\n with crypten.mpc.ConfigManager(\"max_method\", method):\n self._check_forward_backward(reduction, tensor)\n\n # Check dim 0 if tensor is 0-dimensional\n dims = 1 if tensor.dim() == 0 else tensor.dim()\n for dim in range(dims):\n\n # check when keepdim is not provided as a kwarg\n if method is None:\n self._check_forward_backward(reduction, tensor, dim=dim)\n else:\n with crypten.mpc.ConfigManager(\"max_method\", method):\n self._check_forward_backward(reduction, tensor, dim=dim)\n\n # check when keepdim is provided as a kwarg\n for keepdim in [False, True]:\n if method is None:\n self._check_forward_backward(\n reduction, tensor, dim, keepdim=keepdim\n )\n self._check_forward_backward(\n reduction, tensor, dim=dim, keepdim=keepdim\n )\n else:\n with crypten.mpc.ConfigManager(\"max_method\", method):\n self._check_forward_backward(\n reduction, tensor, dim, keepdim=keepdim\n )\n self._check_forward_backward(\n reduction, tensor, dim=dim, keepdim=keepdim\n )\n\n def test_matmul(self):\n \"\"\"Test matmul with broadcasting.\"\"\"\n matmul_sizes = [(1, 1), (1, 5), (5, 1), (5, 5)]\n batch_dims = [(), (1,), (5,), (1, 1), (1, 5), (5, 5)]\n\n matched_sizes = [\n ((1,), (1,)),\n ((10,), (10,)),\n ((10,), (10, 5)),\n ((5, 10), (10,)),\n ]\n\n matmul_funcs = [\"matmul\", \"__matmul__\", \"__imatmul__\"]\n torch_funcs = [\"matmul\", \"__matmul__\", \"__matmul__\"]\n for i, func in enumerate(matmul_funcs):\n for size in matmul_sizes:\n for batch1, batch2 in itertools.combinations(batch_dims, 2):\n size1 = (*batch1, *size)\n size2 = (*batch2, *size)\n\n tensor1 = get_random_test_tensor(size=size1, is_float=True)\n tensor2 = get_random_test_tensor(size=size2, is_float=True)\n tensor2 = tensor2.transpose(-2, -1)\n self._check_forward_backward(\n func, tensor1, tensor2, torch_func_name=torch_funcs[i]\n )\n\n for sizes in matched_sizes:\n tensor1 = get_random_test_tensor(size=sizes[0], is_float=True)\n tensor2 = get_random_test_tensor(size=sizes[1], is_float=True)\n\n self._check_forward_backward(\n func, tensor1, tensor2, torch_func_name=torch_funcs[i]\n )\n\n def test_unary_functions(self):\n \"\"\"Test unary functions on tensors of various sizes.\"\"\"\n unary_functions = [\n \"neg\",\n \"__neg__\",\n \"exp\",\n \"reciprocal\",\n \"abs\",\n \"__abs__\",\n \"sign\",\n \"relu\",\n \"sin\",\n \"cos\",\n \"sigmoid\",\n \"tanh\",\n \"log\",\n \"sqrt\",\n ]\n pos_only_functions = [\"log\", \"sqrt\"]\n for func in unary_functions:\n for size in SIZES:\n tensor = get_random_test_tensor(size=size, is_float=True)\n\n # Make tensor positive when positive inputs are required\n if func in pos_only_functions:\n tensor = tensor.abs()\n\n self._check_forward_backward(func, tensor)\n\n def test_hardtanh(self):\n tensor = torch.arange(-10, 10, dtype=torch.float32)\n for minval in range(-10, 10):\n for maxval in range(minval, 11):\n self._check_forward_backward(\"hardtanh\", tensor, minval, maxval)\n self._check_forward_backward(\"relu6\", tensor)\n\n def test_inplace_warning(self):\n \"\"\"Tests that a warning is thrown that indicates that the `inplace` kwarg\n is ignored when a function is called with `inplace=True`\n \"\"\"\n tensor = get_random_test_tensor(is_float=True)\n encrypted = crypten.cryptensor(tensor)\n\n functions = [\"dropout\", \"_feature_dropout\"]\n for func in functions:\n warning_str = (\n f\"CrypTen {func} does not support inplace computation during training.\"\n )\n with self.assertLogs(logger=logging.getLogger(), level=\"WARNING\") as cm:\n getattr(encrypted, func)(inplace=True)\n self.assertTrue(f\"WARNING:root:{warning_str}\" in cm.output)\n\n def test_dot_ger(self):\n \"\"\"Test inner and outer products of encrypted tensors.\"\"\"\n for length in range(1, 10):\n tensor1 = get_random_test_tensor(size=(length,), is_float=True)\n tensor2 = get_random_test_tensor(size=(length,), is_float=True)\n\n self._check_forward_backward(\"dot\", tensor1, tensor2)\n self._check_forward_backward(\"ger\", tensor1, tensor2)\n\n def test_squeeze_unsqueeze(self):\n \"\"\"Test addition and removal of tensor dimensions\"\"\"\n for size in SIZES:\n tensor = get_random_test_tensor(size=size, is_float=True)\n\n self._check_forward_backward(\"squeeze\", tensor)\n for dim in range(tensor.dim()):\n self._check_forward_backward(\"squeeze\", tensor, dim)\n self._check_forward_backward(\"unsqueeze\", tensor, dim)\n\n # Check unsqueeze on last dimension\n self._check_forward_backward(\"unsqueeze\", tensor, tensor.dim())\n\n def test_softmax(self):\n \"\"\"Test softmax\"\"\"\n for size in SIZES:\n tensor = get_random_test_tensor(size=size, is_float=True)\n\n # Check dim 0 if tensor is 0-dimensional\n dims = 1 if tensor.dim() == 0 else tensor.dim()\n for dim in range(dims):\n self._check_forward_backward(\"softmax\", tensor, dim)\n\n def test_log_softmax(self):\n \"\"\"Test log_softmax\"\"\"\n for size in SIZES:\n tensor = get_random_test_tensor(size=size, is_float=True)\n\n # Check dim 0 if tensor is 0-dimensional\n dims = 1 if tensor.dim() == 0 else tensor.dim()\n for dim in range(dims):\n self._check_forward_backward(\"log_softmax\", tensor, dim)\n\n def test_transpose(self):\n for size in SIZES:\n tensor = get_random_test_tensor(size=size, is_float=True)\n\n if tensor.dim() == 2: # t() asserts dim == 2\n self._check_forward_backward(\"t\", tensor)\n\n for dim0 in range(tensor.dim()):\n for dim1 in range(tensor.dim()):\n self._check_forward_backward(\"transpose\", tensor, dim0, dim1)\n\n def test_permute(self):\n for ndims in range(5):\n size = tuple([3] * ndims)\n tensor = get_random_test_tensor(size=size, is_float=True)\n\n for perm in itertools.permutations(list(range(ndims))):\n self._check_forward_backward(\"permute\", tensor, perm)\n\n def test_conv1d_smaller_signal_one_channel(self):\n self._conv1d(5, 1)\n\n def test_conv1d_smaller_signal_many_channels(self):\n self._conv1d(5, 5)\n\n def test_conv1d_larger_signal_one_channel(self):\n self._conv1d(16, 1)\n\n def test_conv1d_larger_signal_many_channels(self):\n self._conv1d(16, 5)\n\n def _conv1d(self, signal_size, in_channels):\n \"\"\"Test convolution of encrypted tensor with public/private tensors.\"\"\"\n nbatches = [1, 3]\n nout_channels = [1, 5]\n kernel_sizes = [1, 2, 3]\n paddings = [0, 1]\n strides = [1, 2]\n dilations = [1, 2]\n groupings = [1, 2]\n\n for (\n batches,\n kernel_size,\n out_channels,\n padding,\n stride,\n dilation,\n groups,\n ) in itertools.product(\n nbatches,\n kernel_sizes,\n nout_channels,\n paddings,\n strides,\n dilations,\n groupings,\n ):\n # TODO: Fix conv1d gradient in this case:\n if in_channels > 1 and groups > 1:\n continue\n\n size = (batches, in_channels * groups, signal_size)\n signal = get_random_test_tensor(size=size, is_float=True)\n\n kernel_size = (out_channels * groups, in_channels, kernel_size)\n kernel = get_random_test_tensor(size=kernel_size, is_float=True)\n\n self._check_forward_backward(\n \"conv1d\",\n signal,\n kernel,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n )\n\n def test_conv2d_square_image_one_channel(self):\n self._conv2d((5, 5), 1)\n\n def test_conv2d_square_image_many_channels(self):\n self._conv2d((5, 5), 5)\n\n def test_conv2d_rectangular_image_one_channel(self):\n self._conv2d((16, 7), 1)\n\n def test_conv2d_rectangular_image_many_channels(self):\n self._conv2d((16, 7), 5)\n\n def _conv2d(self, image_size, in_channels):\n \"\"\"Test convolution of encrypted tensor with public/private tensors.\"\"\"\n nbatches = [1, 3]\n kernel_sizes = [(1, 1), (2, 2), (2, 3)]\n ochannels = [1, 3]\n paddings = [0, 1, (0, 1)]\n strides = [1, 2, (1, 2)]\n dilations = [1, 2, (1, 2)]\n groupings = [1, 2]\n\n for (\n batches,\n kernel_size,\n out_channels,\n padding,\n stride,\n dilation,\n groups,\n ) in itertools.product(\n nbatches, kernel_sizes, ochannels, paddings, strides, dilations, groupings\n ):\n # TODO: Fix conv2d gradient in this case:\n if in_channels > 1 and groups > 1:\n continue\n\n size = (batches, in_channels * groups, *image_size)\n image = get_random_test_tensor(size=size, is_float=True)\n\n kernel_size = (out_channels * groups, in_channels, *kernel_size)\n kernel = get_random_test_tensor(size=kernel_size, is_float=True)\n\n self._check_forward_backward(\n \"conv2d\",\n image,\n kernel,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n )\n\n def test_max_pool2d(self):\n \"\"\"Tests max pooling gradient\"\"\"\n self._check_pooling(\"max_pool2d\")\n\n def test_avg_pool2d(self):\n \"\"\"Tests average pooling gradient\"\"\"\n self._check_pooling(\"avg_pool2d\")\n\n def _check_pooling(self, func):\n \"\"\"Helper for testing pooling gradients to avoid test timeouts\"\"\"\n image_sizes = [(5, 5), (6, 7)]\n nchannels = [1, 3]\n nbatches = [1, 3]\n\n kernel_sizes = [1, 2, (2, 3)]\n paddings = [1, (0, 0)]\n strides = [1, (2, 2)]\n dilations = [1, 2]\n\n ceil_modes = [False, True] if func == \"max_pool2d\" else [False]\n\n for image_size, channels, batches, kernel_size in itertools.product(\n image_sizes, nchannels, nbatches, kernel_sizes\n ):\n size = (batches, channels, *image_size)\n image = get_random_test_tensor(size=size, is_float=True)\n\n for padding, stride, ceil_mode in itertools.product(\n paddings, strides, ceil_modes\n ):\n # Skip invalid padding sizes\n if kernel_size == 1 and padding == 1:\n continue\n if func == \"max_pool2d\":\n for dilation in dilations:\n self._check_max_pool2d_forward_backward(\n image, kernel_size, padding, stride, dilation, ceil_mode\n )\n else:\n self._check_forward_backward(\n func, image, kernel_size, padding=padding, stride=stride\n )\n\n def _check_max_pool2d_forward_backward(\n self, image, kernel_size, padding, stride, dilation, ceil_mode, tol=0.1\n ):\n \"\"\"Checks forward and backward are for max pool 2d.\n Verifies gradients by checking sum of non-matching elements to account for\n differences in tie resolution in max between PyTorch and CrypTen:\n PyTorch returns smallest index for max entries,\n whereas CrypTen returns a random index.\n\n Args:\n image (torch.tensor): input\n kernel_size (tuple of ints): size of the window over which to compute max\n padding (int or tuple of ints): implicit zero padding to added on both sides\n stride (int or tuple of ints): the stride of the window\n ceil_mode (bool): determines whether output size is rounded down or up\n \"\"\"\n # check forward\n image = image.clone()\n image.requires_grad = True\n image_enc = crypten.cryptensor(image, requires_grad=True)\n\n out = torch.nn.functional.max_pool2d(\n image,\n kernel_size,\n padding=padding,\n stride=stride,\n dilation=dilation,\n ceil_mode=ceil_mode,\n )\n out_enc = image_enc.max_pool2d(\n kernel_size,\n padding=padding,\n stride=stride,\n dilation=dilation,\n ceil_mode=ceil_mode,\n )\n if out.isinf().any():\n # PyTorch can produce improperly sized outputs with Inf values using ceil_mode in some cases\n if ceil_mode:\n return\n self.assertTrue(\n out.size() == out_enc.size(), \"max_pool2d forward incorrect\"\n )\n return # backward will break if output is -inf\n else:\n self._check(out_enc, out, \"max_pool2d forward incorrect\")\n\n # check backward\n grad_output = get_random_test_tensor(size=out.size(), is_float=True)\n grad_output_enc = crypten.cryptensor(grad_output)\n out.backward(grad_output)\n out_enc.backward(grad_output_enc)\n\n # check sum of non-matching gradient entries\n crypten_grad = image_enc.grad.get_plain_text()\n non_matching_indices = (image.grad - crypten_grad).abs() > tol\n sum_is_close = (\n crypten_grad[non_matching_indices].sum()\n - image.grad[non_matching_indices].sum()\n ) < tol\n if not sum_is_close:\n msg = \"max_pool2d backward failed\"\n logging.info(msg)\n logging.info(f\"Result: crypten image gradient {crypten_grad}\")\n logging.info(f\"Result - Reference {image.grad - crypten_grad}\")\n self.assertTrue(sum_is_close, msg=msg)\n\n def test_square(self):\n \"\"\"Tests square function gradient.\n Note: torch pow(2) is used to verify gradient,\n since PyTorch does not implement square().\n \"\"\"\n for size in SIZES:\n tensor = get_random_test_tensor(size=size, is_float=True)\n tensor.requires_grad = True\n tensor_encr = crypten.cryptensor(tensor, requires_grad=True)\n\n out = tensor.pow(2)\n out_encr = tensor_encr.square()\n self._check(out_encr, out, f\"square forward failed with size {size}\")\n\n grad_output = get_random_test_tensor(size=out.shape, is_float=True)\n out.backward(grad_output)\n out_encr.backward(crypten.cryptensor(grad_output))\n self._check(\n tensor_encr.grad,\n tensor.grad,\n f\"square backward failed with size {size}\",\n )\n\n def test_pow(self):\n self._pow_helper(\"pow\")\n\n def test_magic_pow(self):\n self._pow_helper(\"__pow__\")\n\n def _pow_helper(self, pow_fn):\n for size in SIZES:\n tensor = get_random_test_tensor(size=size, min_value=0.5, is_float=True)\n for power in [-3, -2, -1, 0, 1, 2, 3]:\n self._check_forward_backward(pow_fn, tensor, power)\n self._check_forward_backward(pow_fn, tensor, float(power))\n\n def test_norm(self):\n \"\"\"Tests p-norm\"\"\"\n self.default_tolerance *= 2 # Increase tolerance for norm test\n for p in [1, 1.5, 2, 3, float(\"inf\"), \"fro\"]:\n tensor = get_random_test_tensor(max_value=2, size=(3, 3, 3), is_float=True)\n\n self._check_forward_backward(\"norm\", tensor, p=p)\n for dim in [0, 1, 2]:\n self._check_forward_backward(\"norm\", tensor, p=p, dim=dim)\n\n def test_pad(self):\n \"\"\"Tests padding\"\"\"\n sizes = [(1,), (5,), (1, 1), (5, 5), (5, 5, 5), (5, 3, 32, 32)]\n pads = [\n # (0, 0, 0, 0), NOTE: Pytorch backward fails when padding is all 0s\n (1, 0, 0, 0),\n (0, 1, 0, 0),\n (0, 0, 1, 0),\n (0, 0, 0, 1),\n (1, 1, 1, 1),\n (2, 2, 1, 1),\n (2, 2, 2, 2),\n ]\n\n for size in sizes:\n tensor = get_random_test_tensor(size=size, is_float=True)\n for pad in pads:\n if tensor.dim() < 2:\n pad = pad[:2]\n\n # NOTE: Pytorch backward fails when padding is all 0s\n if pad[0] == 0 and pad[1] == 0:\n continue\n\n for value in [0, 1, 10]:\n self._check_forward_backward(\"pad\", tensor, pad, value=value)\n\n def test_clone(self):\n \"\"\"Tests shallow_copy and clone of encrypted tensors.\"\"\"\n sizes = [(5,), (1, 5), (5, 10, 15)]\n for size in sizes:\n tensor = get_random_test_tensor(size=size, is_float=True)\n self._check_forward_backward(\"clone\", tensor)\n\n def test_cat_stack(self):\n for module in [crypten, torch]: # torch.cat on CrypTensor runs crypten.cat\n for func in [\"cat\", \"stack\"]:\n for dimensions in range(1, 5):\n size = [5] * dimensions\n for num_tensors in range(1, 5):\n for dim in range(dimensions):\n tensors = [\n get_random_test_tensor(size=size, is_float=True)\n for _ in range(num_tensors)\n ]\n encrypted_tensors = [\n crypten.cryptensor(t, requires_grad=True)\n for t in tensors\n ]\n for i in range(len(tensors)):\n tensors[i].grad = None\n tensors[i].requires_grad = True\n encrypted_tensors[i].grad = None\n encrypted_tensors[i].requires_grad = True\n\n # Forward\n reference = getattr(torch, func)(tensors, dim=dim)\n encrypted_out = getattr(module, func)(\n encrypted_tensors, dim=dim\n )\n self._check(\n encrypted_out, reference, f\"{func} forward failed\"\n )\n\n # Backward\n grad_output = get_random_test_tensor(\n size=reference.size(), is_float=True\n )\n encrypted_grad_output = crypten.cryptensor(grad_output)\n\n reference.backward(grad_output)\n encrypted_out.backward(encrypted_grad_output)\n for i in range(len(tensors)):\n self._check(\n encrypted_tensors[i].grad,\n tensors[i].grad,\n f\"{func} backward failed\",\n )\n\n def test_dropout(self):\n \"\"\"Tests forward for dropout\"\"\"\n # Create a separate test for dropout since it cannot use the\n # regular forward function\n # There's no need to check backwards since PyTorch backwards fails\n all_prob_values = [x * 0.2 for x in range(0, 5)]\n for dropout_fn in [\"dropout\", \"_feature_dropout\"]:\n for prob in all_prob_values:\n for size in [(5, 10), (5, 10, 15), (5, 10, 15, 20)]:\n for use_zeros in [False, True]:\n tensor = get_random_test_tensor(\n size=size, ex_zero=True, min_value=1.0, is_float=True\n )\n if use_zeros:\n # turn the first row to all zeros\n index = [1] + [\n slice(0, tensor.size(i)) for i in range(1, tensor.dim())\n ]\n tensor[index] = 0.0\n\n encr_tensor = crypten.cryptensor(tensor, requires_grad=True)\n encr_tensor_out = getattr(encr_tensor, dropout_fn)(p=prob)\n dropout_tensor = encr_tensor_out.get_plain_text()\n\n # Check the scaling for non-zero elements\n scaled_tensor = tensor / (1 - prob)\n reference = dropout_tensor.where(\n dropout_tensor == 0.0, scaled_tensor\n )\n self._check(\n encr_tensor_out,\n reference,\n \"dropout failed with size {}, use_zeros {}, and \"\n \"probability {}\".format(size, use_zeros, prob),\n )\n\n def test_batchnorm(self):\n \"\"\"\n Tests batchnorm forward and backward steps with training on / off.\n \"\"\"\n tolerance = 0.1\n sizes = [(8, 5), (16, 3), (32, 5), (8, 6, 4), (8, 4, 3, 5)]\n torch.autograd.set_detect_anomaly(True)\n for size in sizes:\n for is_training in (False, True):\n\n # sample input data, weight, and bias:\n tensor = get_random_test_tensor(size=size, is_float=True)\n encrypted_input = crypten.cryptensor(tensor)\n C = size[1]\n weight = get_random_test_tensor(size=[C], max_value=1, is_float=True)\n bias = get_random_test_tensor(size=[C], max_value=1, is_float=True)\n weight.requires_grad = True\n bias.requires_grad = True\n\n # dimensions over which means and variances are computed:\n stats_dimensions = list(range(tensor.dim()))\n stats_dimensions.pop(1)\n\n # dummy running mean and variance:\n running_mean = tensor.mean(stats_dimensions).detach()\n running_var = tensor.var(stats_dimensions).detach()\n enc_running_mean = crypten.cryptensor(running_mean)\n enc_running_var = crypten.cryptensor(running_var)\n\n # compute reference output:\n tensor.requires_grad = True\n reference = torch.nn.functional.batch_norm(\n tensor,\n running_mean,\n running_var,\n weight=weight,\n bias=bias,\n training=is_training,\n )\n\n # compute CrypTen output:\n encrypted_input.requires_grad = True\n ctx = AutogradContext()\n batch_norm_fn = crypten.gradients.get_grad_fn(\"batchnorm\")\n with crypten.no_grad():\n encrypted_out = batch_norm_fn.forward(\n ctx,\n encrypted_input,\n weight,\n bias,\n training=is_training,\n running_mean=enc_running_mean,\n running_var=enc_running_var,\n )\n\n # check forward\n self._check(\n encrypted_out,\n reference,\n \"batchnorm forward failed with training \"\n f\"{is_training} on {tensor.dim()}-D\",\n tolerance=tolerance,\n )\n\n # check backward (input, weight, and bias gradients):\n grad_input = get_random_test_tensor(\n size=reference.size(), is_float=True\n )\n reference.backward(grad_input)\n with crypten.no_grad():\n enc_grad_input = crypten.cryptensor(grad_input)\n encrypted_grad = batch_norm_fn.backward(ctx, enc_grad_input)\n TorchGrad = namedtuple(\"TorchGrad\", [\"name\", \"value\"])\n torch_gradients = [\n TorchGrad(\"input gradient\", tensor.grad),\n TorchGrad(\"weight gradient\", weight.grad),\n TorchGrad(\"bias gradient\", bias.grad),\n ]\n for i, torch_gradient in enumerate(torch_gradients):\n self._check(\n encrypted_grad[i],\n torch_gradient.value,\n f\"batchnorm backward {torch_gradient.name} failed \"\n f\"with training {is_training} on {tensor.dim()}-D\",\n tolerance=tolerance,\n )\n\n def test_cross_entropy(self):\n \"\"\"Tests cross_entropy and binary_cross_entropy\"\"\"\n sizes = [(3, 2), (8, 4), (5, 10)]\n losses = [\n \"binary_cross_entropy\",\n \"binary_cross_entropy_with_logits\",\n \"cross_entropy\",\n ]\n\n for size, loss in itertools.product(sizes, losses):\n for skip_forward in [False, True]:\n batch_size, num_targets = size\n if loss in [\"binary_cross_entropy\", \"binary_cross_entropy_with_logits\"]:\n if loss == \"binary_cross_entropy\":\n tensor = get_random_test_tensor(\n size=(batch_size,), max_value=0.998, is_float=True\n )\n tensor = tensor.abs().add_(0.001)\n else:\n tensor = get_random_test_tensor(\n size=(batch_size,), is_float=True\n )\n\n target = get_random_test_tensor(size=(batch_size,), is_float=True)\n target = target.gt(0.0).float()\n target_encr = crypten.cryptensor(target)\n else:\n tensor = get_random_test_tensor(size=size, is_float=True)\n target = get_random_test_tensor(\n size=(batch_size,), max_value=num_targets - 1\n )\n target = onehot(target.abs(), num_targets=num_targets)\n target_encr = crypten.cryptensor(target)\n # CrypTen, unlike PyTorch, uses one-hot targets\n target = target.argmax(1)\n\n # forward\n tensor.requires_grad = True\n tensor_encr = crypten.cryptensor(tensor, requires_grad=True)\n reference = getattr(torch.nn.functional, loss)(tensor, target)\n out_encr = getattr(tensor_encr, loss)(\n target_encr, skip_forward=skip_forward\n )\n if not skip_forward:\n self._check(out_encr, reference, f\"{loss} forward failed\")\n\n # backward\n reference.backward()\n out_encr.backward()\n self._check(\n tensor_encr.grad, tensor.grad, f\"{loss} backward failed with\"\n )\n\n def test_view_reshape(self):\n \"\"\"Tests view and reshape gradients\"\"\"\n size_to_views = {\n (10,): [(5, 2), (1, 10)],\n (10, 5): [(50), (2, 5, 5)],\n (5, 10, 8): [(400), (50, 8), (5, 5, 2, 8)],\n }\n\n for size in size_to_views:\n for view in size_to_views[size]:\n tensor = get_random_test_tensor(size=size, is_float=True)\n self._check_forward_backward(\"view\", tensor, view)\n self._check_forward_backward(\"reshape\", tensor, view)\n\n def test_narrow_flatten(self):\n \"\"\"Tests narrow and flatten gradients\"\"\"\n sizes = [(10,), (5, 4), (10, 6, 8)]\n\n for size in sizes:\n tensor = get_random_test_tensor(size=size, is_float=True)\n self._check_forward_backward(\"flatten\", tensor)\n for dim in range(tensor.dim()):\n self._check_forward_backward(\"narrow\", tensor, dim, 0, 2)\n self._check_forward_backward(\"narrow\", tensor, dim, 1, 3)\n\n def test_flip(self):\n \"\"\"Tests flip gradient\"\"\"\n sizes = [(2, 3, 7, 2), (5, 10, 15)]\n flips = [(0, 2, 1), (0, 1)]\n\n for size in sizes:\n tensor = get_random_test_tensor(size=size, is_float=True)\n for flip in flips:\n self._check_forward_backward(\"flip\", tensor, flip)\n\n def test_gather_scatter(self):\n \"\"\"Tests gather and scatter gradients\"\"\"\n sizes = [(2, 2), (3, 5), (3, 5, 10)]\n indices = [[0, 1, 0, 0], [0, 1, 0, 0, 1] * 3, [0, 0, 1] * 50]\n dims = [0, 1]\n funcs = [\"scatter\", \"gather\"]\n\n for dim, func in itertools.product(dims, funcs):\n for size, index in zip(sizes, indices):\n tensor = get_random_test_tensor(size=size, is_float=True)\n index = torch.tensor(index).reshape(tensor.shape)\n\n tensor.requires_grad = True\n tensor_encr = crypten.cryptensor(tensor, requires_grad=True)\n\n if func == \"gather\":\n reference = getattr(tensor, func)(dim, index)\n out_encr = getattr(tensor_encr, func)(dim, index)\n else:\n src = get_random_test_tensor(size=index.shape, is_float=True)\n reference = getattr(tensor, func)(dim, index, src)\n out_encr = getattr(tensor_encr, func)(dim, index, src)\n\n self._check(\n out_encr, reference, f\"{func} forward failed with index {index}\"\n )\n\n grad_out = get_random_test_tensor(size=reference.shape, is_float=True)\n grad_out_encr = crypten.cryptensor(grad_out)\n reference.backward(grad_out)\n out_encr.backward(grad_out_encr)\n\n self._check(\n tensor_encr.grad,\n tensor.grad,\n f\"{func} backward failed with index {index}\",\n )\n\n def test_index_select(self):\n \"\"\"Tests index_select gradients\"\"\"\n sizes = [(2, 2), (3, 5), (3, 5, 10), (4, 8, 2, 5)]\n for size in sizes:\n tensor = get_random_test_tensor(size=size, is_float=True)\n for dim in range(len(size)):\n for index_size in range(size[dim]):\n index = get_random_test_tensor(\n max_value=(size[dim] - 1),\n min_value=0,\n size=(index_size,),\n is_float=False,\n )\n self._check_forward_backward(\"index_select\", tensor, dim, index)\n\n def test_take(self):\n \"\"\"Tests take gradients\"\"\"\n sizes = [(10,), (5, 10), (2, 5, 10)]\n indices = [[0], [0, 5], [0, 2, 5, 8]]\n\n for size, index in itertools.product(sizes, indices):\n tensor = get_random_test_tensor(size=size, is_float=True)\n index = torch.tensor(index)\n self._check_forward_backward(\"take\", tensor, index)\n\n def test_roll(self):\n \"\"\"Tests roll gradients\"\"\"\n sizes = [(1, 10), (5, 10), (2, 5, 10)]\n shifts = [1, 3, (1, 2)]\n dims = [0, 1, (0, 1)]\n\n for size, shift_dim in itertools.product(sizes, zip(shifts, dims)):\n shift, dim = shift_dim\n tensor = get_random_test_tensor(size=size, is_float=True)\n self._check_forward_backward(\"roll\", tensor, shift, dim)\n\n def test_cumsum(self):\n \"\"\"Tests cumsum gradient\"\"\"\n sizes = [(), (10,), (5, 10), (2, 5, 10)]\n\n for size in sizes:\n tensor = get_random_test_tensor(size=size, is_float=True)\n for dim in range(tensor.dim()):\n self._check_forward_backward(\"cumsum\", tensor, dim)\n\n def test_trace(self):\n \"\"\"Tests trace gradient\"\"\"\n sizes = [(1, 1), (3, 3), (10, 10)]\n\n for size in sizes:\n tensor = get_random_test_tensor(size=size, is_float=True)\n self._check_forward_backward(\"trace\", tensor)\n\n def test_var(self):\n \"\"\"Tests var gradient\"\"\"\n sizes = [(10,), (1, 10), (5, 10), (2, 5, 10)]\n import crypten\n\n for size in sizes:\n tensor = get_random_test_tensor(size=size, is_float=True)\n self._check_forward_backward(\"var\", tensor)\n for unbiased in [False, True]:\n self._check_forward_backward(\"var\", tensor, unbiased=unbiased)\n for dim, keepdim in itertools.product(range(len(size)), [False, True]):\n # skip dimensions with 1 element\n if size[dim] == 1:\n continue\n self._check_forward_backward(\n \"var\", tensor, dim, unbiased=unbiased, keepdim=keepdim\n )\n\n def test_getitem(self):\n \"\"\"Tests getitem gradient\"\"\"\n sizes = [(10,), (10, 1), (5, 10), (5, 2, 10)]\n indices = [0, 1, 3]\n\n for size, index in itertools.product(sizes, indices):\n tensor = get_random_test_tensor(size=size, is_float=True)\n self._check_forward_backward(\"__getitem__\", tensor, index)\n\n def test_pos_pow(self):\n \"\"\"Test gradient crypten pos_pow\"\"\"\n for power in [3, -2, 1.75]:\n # ensure base is positive for pos_pow\n tensor = get_random_test_tensor(is_float=True, max_value=2) + 4\n tensor.requires_grad = True\n tensor_encr = crypten.cryptensor(tensor, requires_grad=True)\n\n reference = tensor.pow(power)\n out_encr = tensor_encr.pos_pow(power)\n self._check(\n out_encr, reference, f\"pos_pow forward failed with power {power}\"\n )\n\n grad_out = get_random_test_tensor(is_float=True)\n grad_out_encr = crypten.cryptensor(grad_out)\n reference.backward(grad_out)\n out_encr.backward(grad_out_encr)\n\n self._check(\n tensor_encr.grad,\n tensor.grad,\n f\"pos_pow backward failed with power {power}\",\n )\n\n def test_polynomial(self):\n for terms in range(1, 5):\n for encrypt_coeffs in [False, True]:\n tensor = get_random_test_tensor(is_float=True)\n tensor.requires_grad = True\n tensor_encr = crypten.cryptensor(tensor, requires_grad=True)\n\n coeffs_size = (terms,)\n coeffs = get_random_test_tensor(size=coeffs_size, is_float=True)\n\n reference = (\n tensor.unsqueeze(0)\n .pow(torch.arange(terms).add(1).view([terms] + [1] * terms))\n .mul(coeffs.view([terms] + [1] * terms))\n .sum(0)\n .view(tensor.size())\n )\n if encrypt_coeffs:\n coeffs = crypten.cryptensor(coeffs)\n out_encr = tensor_encr.polynomial(coeffs)\n self._check(out_encr, reference, \"polynomial forward failed\")\n\n grad_out = get_random_test_tensor(size=reference.size(), is_float=True)\n grad_out_encr = crypten.cryptensor(grad_out)\n reference.backward(grad_out)\n out_encr.backward(grad_out_encr)\n self._check(\n tensor_encr.grad,\n tensor.grad,\n \"polynomial backward failed\",\n )\n\n\n# Run all unit tests with both TFP and TTP providers\nclass TestTFP(MultiProcessTestCase, TestGradients):\n def setUp(self):\n self._original_provider = crypten.mpc.get_default_provider()\n crypten.mpc.set_default_provider(crypten.mpc.provider.TrustedFirstParty)\n super(TestTFP, self).setUp()\n\n def tearDown(self):\n crypten.mpc.set_default_provider(self._original_provider)\n super(TestTFP, self).tearDown()\n\n\nclass TestTTP(MultiProcessTestCase, TestGradients):\n def setUp(self):\n self._original_provider = crypten.mpc.get_default_provider()\n crypten.mpc.set_default_provider(crypten.mpc.provider.TrustedThirdParty)\n super(TestTTP, self).setUp()\n\n def tearDown(self):\n crypten.mpc.set_default_provider(self._original_provider)\n super(TestTTP, self).tearDown()\n\n\nclass TestPTT(unittest.TestCase, TestGradients):\n def setUp(self):\n self.default_tolerance = 0.5\n self._original_backend = crypten.get_default_cryptensor_type()\n crypten.set_default_cryptensor_type(\"ptt\")\n super(TestPTT, self).setUp()\n crypten.init()\n\n def tearDown(self):\n crypten.set_default_cryptensor_type(self._original_backend)\n super(TestPTT, self).setUp()\n\n\n# This code only runs when executing the file outside the test harness\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.nn.functional.batch_norm", "torch.autograd.set_detect_anomaly", "torch.tensor", "torch.arange", "torch.nn.functional.max_pool2d" ] ]
brightway-lca/brightway2-calc-copy
[ "770cb83954499199888bb7422442cbdc7ef553b3" ]
[ "tests/fixtures/create_fixtures.py" ]
[ "from bw_processing import create_datapackage, INDICES_DTYPE, UNCERTAINTY_DTYPE\nfrom fs.zipfs import ZipFS\nfrom fs.osfs import OSFS\nfrom pathlib import Path\nimport json\nimport numpy as np\n\n\nfixture_dir = Path(__file__).resolve().parent\n\n\ndef bw2io_example_database():\n try:\n import bw2data as bd\n import bw2io as bi\n from bw2data.backends.schema import ActivityDataset as AD\n\n if \"__fixture_creation__\" in bd.projects:\n bd.projects.delete_project(\"__fixture_creation__\", delete_dir=True)\n\n bd.projects.set_current(\"__fixture_creation__\")\n bi.add_example_database()\n db = bd.Database(\"Mobility example\")\n method = bd.Method((\"IPCC\", \"simple\"))\n\n db.filepath_processed().rename(fixture_dir / \"bw2io_example_db.zip\")\n method.filepath_processed().rename(fixture_dir / \"ipcc_simple.zip\")\n with open(fixture_dir / \"bw2io_example_db_mapping.json\", \"w\") as f:\n json.dump([(obj.name, obj.id) for obj in AD.select()], f)\n\n bd.projects.delete_project(delete_dir=True)\n except ImportError:\n print(\"Can't import libraries for bw2io example database fixture creation\")\n\n\ndef empty_biosphere():\n # Flow 1: The flow\n # Activity 1: The activity\n\n dp = create_datapackage(\n fs=ZipFS(str(fixture_dir / \"empty_biosphere.zip\"), write=True),\n )\n\n data_array = np.array([1, 2, 3])\n indices_array = np.array([(2, 1), (1, 1), (2, 2)], dtype=INDICES_DTYPE)\n flip_array = np.array([1, 0, 0], dtype=bool)\n dp.add_persistent_vector(\n matrix=\"technosphere_matrix\",\n data_array=data_array,\n name=\"eb-technosphere\",\n indices_array=indices_array,\n nrows=3,\n flip_array=flip_array,\n )\n\n data_array = np.array([1])\n indices_array = np.array([(1, 0)], dtype=INDICES_DTYPE)\n dp.add_persistent_vector(\n matrix=\"characterization_matrix\",\n data_array=data_array,\n name=\"eb-characterization\",\n indices_array=indices_array,\n global_index=0,\n nrows=1,\n )\n\n dp.finalize_serialization()\n\n\ndef _create_basic_fixture(fs):\n # Activities: 101, 102\n # Products: 1, 2\n # Biosphere flows: 1\n dp = create_datapackage(fs=fs)\n\n data_array = np.array([1, 1, 0.5])\n indices_array = np.array([(1, 101), (2, 102), (2, 101)], dtype=INDICES_DTYPE)\n flip_array = np.array([0, 0, 1], dtype=bool)\n dp.add_persistent_vector(\n matrix=\"technosphere_matrix\",\n data_array=data_array,\n name=\"technosphere\",\n indices_array=indices_array,\n flip_array=flip_array,\n )\n\n data_array = np.array([1])\n indices_array = np.array([(1, 101)], dtype=INDICES_DTYPE)\n dp.add_persistent_vector(\n matrix=\"biosphere_matrix\",\n data_array=data_array,\n name=\"biosphere\",\n indices_array=indices_array,\n )\n\n data_array = np.array([1])\n indices_array = np.array([(1, 0)], dtype=INDICES_DTYPE)\n dp.add_persistent_vector(\n matrix=\"characterization_matrix\",\n data_array=data_array,\n name=\"eb-characterization\",\n indices_array=indices_array,\n global_index=0,\n nrows=1,\n )\n\n dp.finalize_serialization()\n\n\ndef create_basic_fixture_zipfile():\n _create_basic_fixture(ZipFS(str(fixture_dir / \"basic_fixture.zip\"), write=True))\n\n\ndef create_basic_fixture_directory():\n _create_basic_fixture(OSFS(str(fixture_dir / \"basic_fixture\"), create=True))\n\n\ndef create_svdm_fixtures():\n dp = create_datapackage(\n fs=ZipFS(str(fixture_dir / \"svdm.zip\"), write=True), sequential=True\n )\n\n data_array = np.array([42])\n indices_array = np.array([(1, 1)], dtype=INDICES_DTYPE)\n distributions_array = np.array(\n [\n (4, 0.5, np.NaN, np.NaN, 0.2, 0.8, False),\n ],\n dtype=UNCERTAINTY_DTYPE,\n )\n dp.add_persistent_vector(\n matrix=\"weighting_matrix\",\n data_array=data_array,\n name=\"weighting\",\n indices_array=indices_array,\n distributions_array=distributions_array,\n )\n\n data_array = np.array([1, 2, 3, 4, 5]).reshape((1, 5))\n indices_array = np.array([(1, 1)], dtype=INDICES_DTYPE)\n dp.add_persistent_array(\n matrix=\"weighting_matrix\",\n data_array=data_array,\n name=\"weighting2\",\n indices_array=indices_array,\n )\n\n dp.finalize_serialization()\n\n dp2 = create_datapackage(fs=ZipFS(str(fixture_dir / \"svdm2.zip\"), write=True))\n\n data_array = np.array([88])\n indices_array = np.array([(2, 2)], dtype=INDICES_DTYPE)\n dp2.add_persistent_vector(\n matrix=\"weighting_matrix\",\n data_array=data_array,\n name=\"weighting3\",\n indices_array=indices_array,\n )\n dp2.finalize_serialization()\n\n\ndef create_array_fixtures():\n # Activities: 101, 102\n # Products: 1, 2\n # Biosphere flows: 1\n dp = create_datapackage(\n fs=ZipFS(str(fixture_dir / \"array_sequential.zip\"), write=True), sequential=True\n )\n\n data_array = np.array([1, 1, 0.5])\n indices_array = np.array([(1, 101), (2, 102), (2, 101)], dtype=INDICES_DTYPE)\n flip_array = np.array([0, 0, 1], dtype=bool)\n dp.add_persistent_vector(\n matrix=\"technosphere_matrix\",\n data_array=data_array,\n name=\"technosphere\",\n indices_array=indices_array,\n flip_array=flip_array,\n )\n\n data_array = np.array([[1, 2, 3, 4]])\n indices_array = np.array([(1, 101)], dtype=INDICES_DTYPE)\n dp.add_persistent_array(\n matrix=\"biosphere_matrix\",\n data_array=data_array,\n name=\"biosphere\",\n indices_array=indices_array,\n )\n\n data_array = np.array([1])\n indices_array = np.array([(1, 0)], dtype=INDICES_DTYPE)\n dp.add_persistent_vector(\n matrix=\"characterization_matrix\",\n data_array=data_array,\n name=\"eb-characterization\",\n indices_array=indices_array,\n global_index=0,\n nrows=1,\n )\n\n dp.finalize_serialization()\n\n\ndef create_mc_basic():\n # Flow 1: biosphere\n # Flow 2: biosphere\n # Flow 3: activity 1\n # Flow 4: activity 2\n # Activity 1\n # Activity 2\n dp = create_datapackage(\n fs=ZipFS(str(fixture_dir / \"mc_basic.zip\"), write=True),\n )\n\n data_array = np.array([1, 1, 0.5])\n indices_array = np.array([(3, 1), (4, 2), (4, 1)], dtype=INDICES_DTYPE)\n flip_array = np.array([0, 0, 1], dtype=bool)\n distributions_array = np.array(\n [\n (0, 1, np.NaN, np.NaN, np.NaN, np.NaN, False),\n (0, 1, np.NaN, np.NaN, np.NaN, np.NaN, False),\n (4, 0.5, np.NaN, np.NaN, 0.2, 0.8, False),\n ],\n dtype=UNCERTAINTY_DTYPE,\n )\n dp.add_persistent_vector(\n matrix=\"technosphere_matrix\",\n data_array=data_array,\n name=\"mc-technosphere\",\n indices_array=indices_array,\n distributions_array=distributions_array,\n nrows=3,\n flip_array=flip_array,\n )\n\n data_array = np.array([1, 0.1])\n indices_array = np.array([(1, 1), (2, 2)], dtype=INDICES_DTYPE)\n distributions_array = np.array(\n [\n (4, 1, np.NaN, np.NaN, 0.5, 1.5, False),\n (4, 0.1, np.NaN, np.NaN, 0, 0.2, False),\n ],\n dtype=UNCERTAINTY_DTYPE,\n )\n dp.add_persistent_vector(\n matrix=\"biosphere_matrix\",\n data_array=data_array,\n name=\"mc-biosphere\",\n indices_array=indices_array,\n distributions_array=distributions_array,\n )\n\n data_array = np.array([1, 2])\n indices_array = np.array([(1, 0), (2, 0)], dtype=INDICES_DTYPE)\n distributions_array = np.array(\n [\n (4, 1, np.NaN, np.NaN, 0.5, 2, False),\n (4, 2, np.NaN, np.NaN, 1, 4, False),\n ],\n dtype=UNCERTAINTY_DTYPE,\n )\n dp.add_persistent_vector(\n matrix=\"characterization_matrix\",\n data_array=data_array,\n name=\"mc-characterization\",\n indices_array=indices_array,\n distributions_array=distributions_array,\n global_index=0,\n nrows=3,\n )\n dp.finalize_serialization()\n\n\ndef create_mc_complete():\n # Flow 1: biosphere\n # Flow 2: biosphere\n # Flow 3: activity 1\n # Flow 4: activity 2\n # Activity 1\n # Activity 2\n dp = create_datapackage(\n fs=ZipFS(str(fixture_dir / \"mc_complete.zip\"), write=True),\n )\n\n data_array = np.array([1, 2])\n indices_array = np.array([(100, 0), (200, 0)], dtype=INDICES_DTYPE)\n distributions_array = np.array(\n [\n (4, 100, np.NaN, np.NaN, 50, 200, False),\n (4, 200, np.NaN, np.NaN, 100, 400, False),\n ],\n dtype=UNCERTAINTY_DTYPE,\n )\n dp.add_persistent_vector(\n matrix=\"normalization_matrix\",\n data_array=data_array,\n name=\"mc-normalization\",\n indices_array=indices_array,\n distributions_array=distributions_array,\n )\n\n data_array = np.array([1])\n indices_array = np.array([(0, 0)], dtype=INDICES_DTYPE)\n distributions_array = np.array(\n [\n (4, 1, np.NaN, np.NaN, 0.5, 2, False),\n ],\n dtype=UNCERTAINTY_DTYPE,\n )\n dp.add_persistent_vector(\n matrix=\"weighting_matrix\",\n data_array=data_array,\n name=\"mc-weighting\",\n indices_array=indices_array,\n distributions_array=distributions_array,\n )\n dp.finalize_serialization()\n\n\n# def create_mc_single_activity_only_production():\n# with temporary_project_dir() as td:\n# biosphere = bw2data.Database(\"biosphere\")\n# biosphere.write(\n# {(\"biosphere\", \"1\"): {\"type\": \"emission\"},}\n# )\n# saop = bw2data.Database(\"saop\")\n# saop.write(\n# {\n# (\"saop\", \"1\"): {\n# \"exchanges\": [\n# {\n# \"amount\": 0.5,\n# \"minimum\": 0.2,\n# \"maximum\": 0.8,\n# \"input\": (\"biosphere\", \"1\"),\n# \"type\": \"biosphere\",\n# \"uncertainty type\": 4,\n# },\n# {\n# \"amount\": 1,\n# \"minimum\": 0.5,\n# \"maximum\": 1.5,\n# \"input\": (\"saop\", \"1\"),\n# \"type\": \"production\",\n# \"uncertainty type\": 4,\n# },\n# ],\n# \"type\": \"process\",\n# },\n# }\n# )\n# fixture_dir = this_dir / \"mc_saop\"\n# fixture_dir.mkdir(exist_ok=True)\n# biosphere.filepath_processed().rename(fixture_dir / \"biosphere.zip\")\n# saop.filepath_processed().rename(fixture_dir / \"saop.zip\")\n# with open(fixture_dir / \"mapping.json\", \"w\") as f:\n# json.dump(list(bw2data.mapping.items()), f)\n\n\nif __name__ == \"__main__\":\n # empty_biosphere()\n # bw2io_example_database()\n # create_mc_basic()\n # create_mc_complete()\n create_basic_fixture_zipfile()\n create_basic_fixture_directory()\n create_array_fixtures()\n # create_svdm_fixtures()\n\n\n# create_example_database()\n# create_empty_biosphere()\n# create_mc_single_activity_only_production()\n" ]
[ [ "numpy.array" ] ]
wonbeomjang/kaggle
[ "b9dd12448f5438804412ac0480f06f16ea164549" ]
[ "facial-keypoints-detection/test.py" ]
[ "import os\nimport argparse\nimport torch\nimport torch.nn as nn\nfrom tqdm import tqdm\n\nfrom models import Model\nfrom dataset import get_test_loader\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--image_size', type=int, default=224, help='the height / width of the input image to network')\nparser.add_argument('--data_dir', type=str, default='test', help='directory of dataset')\nparser.add_argument('--num_classes', type=int, default=2, help='number of model output channels')\nparser.add_argument('--batch_size', type=int, default=256, help='batch size')\nparser.add_argument('--epoch', type=int, default=68, help='start number of epochs to train for')\nparser.add_argument('--checkpoint_dir', default='checkpoints', help=\"path to saved models (to continue training)\")\n\nargs = parser.parse_args()\n\ndevice = 'cuda:0' if torch.cuda.is_available() else 'cpu'\nnet = Model(args.num_classes)\n\ndata_loader = get_test_loader(args.data_dir, args.image_size, args.batch_size)\n\nnet.load_state_dict(torch.load(os.path.join(f'{args.checkpoint_dir}', f'{args.epoch}.pth'), map_location=device))\nfile = open('output.csv', 'w')\nfile.write('id,label\\n')\n\nfor indexs, images in tqdm(data_loader):\n with torch.no_grad():\n images: torch.Tensor = images.to(device)\n indexs: torch.Tensor = indexs\n preds = net.predict_image(images)\n\n for i in range(len(indexs)):\n file.write(f'{indexs[i]},{preds[i][1].item()}\\n')" ]
[ [ "torch.no_grad", "torch.cuda.is_available" ] ]
LinXueyuanStdio/KGE-toolbox
[ "916842835e61ba99dde1409592977a2ec55f8aae", "916842835e61ba99dde1409592977a2ec55f8aae" ]
[ "toolbox/nn/TuckERTNT.py", "toolbox/nn/EchoE.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\n\n\nclass TuckERTNT(nn.Module):\n def __init__(self, d, de, dr, dt, device=\"cpu\", input_dropout=0., hidden_dropout1=0., hidden_dropout2=0., **kwargs):\n super(TuckERTNT, self).__init__()\n\n self.device = device\n\n # Embeddings dimensionality\n self.de = de\n self.dr = dr\n self.dt = dt\n\n # Data dimensionality\n self.ne = len(d.entities)\n self.nr = len(d.relations)\n self.nt = len(d.time)\n\n # Embedding matrices\n self.E = nn.Embedding(self.ne, de)\n self.R = nn.Embedding(self.nr, dr)\n self.T = nn.Embedding(self.nt, dt)\n\n # Core tensor\n self.W = nn.Parameter(torch.tensor(np.random.uniform(-0.1, 0.1, (dr, de, dt, de)), dtype=torch.float, device=self.device, requires_grad=True))\n\n # \"Special\" Layers\n self.input_dropout = nn.Dropout(input_dropout)\n self.hidden_dropout1 = nn.Dropout(hidden_dropout1)\n self.hidden_dropout2 = nn.Dropout(hidden_dropout2)\n self.loss = nn.BCELoss()\n\n self.bne = nn.BatchNorm1d(de)\n\n def init(self):\n nn.init.xavier_normal_(self.E.weight.data)\n nn.init.xavier_normal_(self.R.weight.data)\n nn.init.xavier_normal_(self.T.weight.data)\n\n def forward(self, e1_idx, r_idx, t_idx):\n ### Temporal part\n # Mode 1 product with entity vector\n e1 = self.E(e1_idx)\n x = self.bne(e1)\n x = self.input_dropout(x)\n x = x.view(-1, 1, self.de) # (B, 1, de)\n\n # Mode 2 product with relation vector\n r = self.R(r_idx) # (B, dr)\n W_mat = torch.mm(r, self.W.view(r.size(1), -1)) # (B, dr) * (dr, de*de*dt) = (B, de*de*dt)\n W_mat = W_mat.view(-1, self.de, self.de * self.dt) # (B, de, de*dt)\n x = torch.bmm(x, W_mat) # (B, 1, de) * (B, de, de*dt) = (B, 1, de*dt)\n\n # Mode 4 product with entity matrix \n x = x.view(-1, self.de) # (B, de*dt) -> (B*dt, de)\n x = torch.mm(x, self.E.weight.transpose(1, 0)) # (B*dt, de) * (E, de)^T = (B*dt, E)\n\n # Mode 3 product with time vector\n t = self.T(t_idx).view(-1, 1, self.dt) # (B, 1, dt)\n xt = x.view(-1, self.dt, self.ne) # (B, dt, E)\n xt = torch.bmm(t, xt) # (B, 1, dt) * (B, dt, E) -> (B, 1, E)\n xt = xt.view(-1, self.ne) # (B, E)\n\n ### Non temporal part\n # mode 3 product with identity matrix\n x = x.view(-1, self.dt) # (B*E, dt)\n x = torch.mm(x, torch.ones(self.dt).to(self.device).view(self.dt, 1)) # (B*E, dt) * (dt, 1) = (B*E, 1)\n x = x.view(-1, self.ne) # (B, E)\n\n # Sum of the 2 models\n x = x + xt\n\n # Turn results into \"probabilities\"\n pred = torch.sigmoid(x)\n return pred\n", "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch_geometric.utils import softmax\n\nfrom toolbox.nn.Highway import Highway\n\n\nclass GraphEncoder(nn.Module):\n def __init__(self, entity_dim, relation_dim):\n super(GraphEncoder, self).__init__()\n self.a_i = nn.Linear(entity_dim, 1, bias=False) # h\n self.a_j = nn.Linear(entity_dim, 1, bias=False) # t\n self.a_k = nn.Linear(relation_dim, 1, bias=False) # r\n self.out_dim = entity_dim + entity_dim + relation_dim\n\n def forward(self, h, r, t):\n \"\"\"\n h,r,t:BxNxd\n \"\"\"\n e_i = self.a_i(h).squeeze(dim=-1)\n r_k = self.a_k(r).squeeze(dim=-1)\n e_j = self.a_j(t).squeeze(dim=-1)\n a = e_i + e_j + r_k\n alpha = F.leaky_relu(a).float().softmax(dim=-1)\n\n v = torch.cat([h, r, t], dim=-1)\n # print(alpha.shape, v.shape)\n ans = alpha.unsqueeze(dim=-2)\n # print(ans.shape, v.shape)\n ans = ans.bmm(v).squeeze(dim=-2)\n # print(ans.shape)\n return ans\n\n\nclass BatchGraphEncoder(nn.Module):\n def __init__(self, entity_dim, relation_dim):\n super(BatchGraphEncoder, self).__init__()\n self.a_i = nn.Linear(entity_dim, 1, bias=False) # h\n self.a_j = nn.Linear(entity_dim, 1, bias=False) # t\n self.a_k = nn.Linear(relation_dim, 1, bias=False) # r\n self.out_dim = entity_dim + entity_dim + relation_dim\n\n def forward(self, h, r, t):\n \"\"\"\n h:bxd\n r:bxd\n t:bxNxd\n \"\"\"\n h = h.unsqueeze(dim=1)\n r = r.unsqueeze(dim=1)\n e_i = self.a_i(h)\n r_k = self.a_k(r)\n e_j = self.a_j(t)\n a = e_i + e_j + r_k\n alpha = softmax(F.leaky_relu(a).float(), torch.range(start=1, end=a.size(0)).long().to(a.device))\n\n h = torch.cat([h for i in range(t.size(1))], dim=1)\n r = torch.cat([r for i in range(t.size(1))], dim=1)\n v = torch.cat([h, r, t], dim=-1)\n alpha = alpha.transpose(0, 1)\n alpha = alpha.view(alpha.size(0), 1, -1)\n v = v.transpose(0, 1)\n ans = alpha.bmm(v).view(-1, self.out_dim)\n return ans\n\n\nclass Composer(nn.Module):\n def __init__(self, entity_dim, relation_dim):\n super(Composer, self).__init__()\n self.out_dim = entity_dim + entity_dim + relation_dim\n self.highway = Highway(self.out_dim)\n\n def forward(self, h, r, t, g):\n # print(h.shape, r.shape, t.shape, g.shape)\n if len(t.size()) == 3:\n h = h.unsqueeze(dim=1)\n r = r.unsqueeze(dim=1)\n h = torch.cat([h for i in range(t.size(1))], dim=1)\n r = torch.cat([r for i in range(t.size(1))], dim=1)\n # print(h.shape, r.shape, t.shape, g.shape)\n x = torch.cat([h, r, t], dim=-1)\n # print(x.size(), g.size())\n x = self.highway(x, g)\n return x\n\n\nclass ConvE(nn.Module):\n def __init__(self, embedding_dim, img_h=10, input_dropout=0.2, hidden_dropout1=0.2, hidden_dropout2=0.3):\n super(ConvE, self).__init__()\n self.inp_drop = nn.Dropout(input_dropout)\n self.hidden_drop = nn.Dropout(hidden_dropout1)\n self.feature_map_drop = nn.Dropout2d(hidden_dropout2)\n\n self.img_h = img_h\n self.img_w = embedding_dim // self.img_h\n\n self.conv1 = nn.Conv2d(1, 32, (3, 3), 1, 0, bias=True)\n self.bn0 = nn.BatchNorm2d(1)\n self.bn1 = nn.BatchNorm2d(32)\n self.bn2 = nn.BatchNorm1d(embedding_dim)\n\n hidden_size = (self.img_h * 2 - 3 + 1) * (self.img_w - 3 + 1) * 32\n self.fc = nn.Linear(hidden_size, embedding_dim)\n\n def forward(self, h, r):\n h = h.view(-1, 1, self.img_h, self.img_w)\n r = r.view(-1, 1, self.img_h, self.img_w)\n\n x = torch.cat([h, r], 2)\n x = self.bn0(x)\n x = self.inp_drop(x)\n x = self.conv1(x)\n x = self.bn1(x)\n x = F.relu(x)\n x = self.feature_map_drop(x)\n x = x.view(x.shape[0], -1)\n x = self.fc(x)\n x = self.hidden_drop(x)\n x = self.bn2(x)\n x = F.relu(x)\n return x\n\n\nclass Decoder(nn.Module):\n def __init__(self, entity_dim, relation_dim, input_dropout=0.2, hidden_dropout1=0.2, hidden_dropout2=0.3):\n super(Decoder, self).__init__()\n self.model = ConvE(entity_dim, 10, input_dropout, hidden_dropout1, hidden_dropout2)\n\n def forward(self, E, R, head, rel, g):\n h = E(head)\n r = R(rel)\n x = self.model(h, r)\n return x\n\n\nclass L1_Loss(nn.Module):\n def __init__(self, gamma=3):\n super(L1_Loss, self).__init__()\n self.gamma = gamma\n\n def dis(self, x, y):\n return torch.sum(torch.abs(x - y), dim=-1)\n\n def forward(self, x1, x2, train_set, train_batch, false_pair):\n x1_train, x2_train = x1[train_set[:, 0]], x2[train_set[:, 1]]\n x1_neg1 = x1[train_batch[0].view(-1)].reshape(-1, train_set.size(0), x1.size(1))\n x1_neg2 = x2[train_batch[1].view(-1)].reshape(-1, train_set.size(0), x2.size(1))\n x2_neg1 = x2[train_batch[2].view(-1)].reshape(-1, train_set.size(0), x2.size(1))\n x2_neg2 = x1[train_batch[3].view(-1)].reshape(-1, train_set.size(0), x1.size(1))\n\n dis_x1_x2 = self.dis(x1_train, x2_train)\n loss11 = torch.mean(F.relu(self.gamma + dis_x1_x2 - self.dis(x1_train, x1_neg1)))\n loss12 = torch.mean(F.relu(self.gamma + dis_x1_x2 - self.dis(x1_train, x1_neg2)))\n loss21 = torch.mean(F.relu(self.gamma + dis_x1_x2 - self.dis(x2_train, x2_neg1)))\n loss22 = torch.mean(F.relu(self.gamma + dis_x1_x2 - self.dis(x2_train, x2_neg2)))\n if false_pair is not None:\n x1_test_false, x2_test_false = x1[false_pair[:, 0]], x2[false_pair[:, 1]]\n loss3 = torch.mean(F.relu(self.gamma - self.dis(x1_test_false, x2_test_false)))\n loss = (loss11 + loss12 + loss21 + loss22 + loss3) / 5\n else:\n loss = (loss11 + loss12 + loss21 + loss22) / 4\n return loss\n\n\nclass EchoE(nn.Module):\n def __init__(self, num_entities, num_relations, entity_dim, relation_dim):\n super(EchoE, self).__init__()\n self.emb_e = nn.Embedding(num_entities, entity_dim)\n self.emb_rel = nn.Embedding(num_relations, relation_dim)\n\n self.encoder = GraphEncoder(entity_dim, relation_dim)\n self.decoder = Decoder(entity_dim, relation_dim)\n self.composer = Composer(entity_dim, relation_dim)\n self.proj = nn.Linear(self.composer.out_dim, self.composer.out_dim)\n\n def init(self):\n nn.init.xavier_normal_(self.emb_e.weight.data)\n nn.init.xavier_normal_(self.emb_rel.weight.data)\n\n def forward(self, head0, rel0, tail0, head, rel, tail):\n g, h, r = self.ghr(head0, rel0, tail0, head, rel)\n\n t1 = self.decoder(self.emb_e, self.emb_rel, head, rel, g)\n g1 = self.composer(h, r, t1, g)\n g1 = self.proj(g1)\n\n t2 = self.emb_e(tail)\n g2 = self.composer(h, r, t2, g).detach() # 截断梯度\n\n loss = torch.mean(F.relu(self.dis(g1, g2)))\n return loss\n\n def ghr(self, head0, rel0, tail0, head, rel):\n g = self.encode(head0, rel0, tail0)\n h = self.emb_e(head)\n r = self.emb_rel(rel)\n return g, h, r\n\n def encode(self, head0, rel0, tail0):\n h0 = self.emb_e(head0)\n r0 = self.emb_rel(rel0)\n t0 = self.emb_e(tail0)\n g = self.encoder(h0, r0, t0)\n return g\n\n def dis(self, x, y):\n return torch.sum(torch.abs(x - y), dim=-1)\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.nn.Dropout", "torch.sigmoid", "torch.ones", "torch.nn.init.xavier_normal_", "torch.nn.Embedding", "torch.nn.BCELoss", "torch.bmm", "numpy.random.uniform" ], [ "torch.nn.BatchNorm1d", "torch.nn.Dropout", "torch.abs", "torch.nn.Dropout2d", "torch.cat", "torch.nn.Conv2d", "torch.nn.init.xavier_normal_", "torch.nn.Embedding", "torch.nn.Linear", "torch.nn.functional.relu", "torch.nn.functional.leaky_relu", "torch.nn.BatchNorm2d" ] ]
jolares/ripe-banana-collector
[ "88c921d401697b007ee37f360dc05c4b56a046bf" ]
[ "python/unitytrainers/ppo/trainer.py" ]
[ "# # Unity ML-Agents Toolkit\n# ## ML-Agent Learning (PPO)\n# Contains an implementation of PPO as described (https://arxiv.org/abs/1707.06347).\n\nimport logging\nimport os\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom unityagents import AllBrainInfo, BrainInfo\nfrom unitytrainers.buffer import Buffer\nfrom unitytrainers.ppo.models import PPOModel\nfrom unitytrainers.trainer import UnityTrainerException, Trainer\n\nlogger = logging.getLogger(\"unityagents\")\n\n\nclass PPOTrainer(Trainer):\n \"\"\"The PPOTrainer is an implementation of the PPO algorithm.\"\"\"\n\n def __init__(self, sess, env, brain_name, trainer_parameters, training, seed):\n \"\"\"\n Responsible for collecting exp_samples and training PPO model.\n :param sess: Tensorflow session.\n :param env: The UnityEnvironment.\n :param trainer_parameters: The parameters for the trainer (dictionary).\n :param training: Whether the trainer is set for training.\n \"\"\"\n self.param_keys = ['batch_size', 'beta', 'buffer_size', 'epsilon', 'gamma', 'hidden_units', 'lambd',\n 'learning_rate', 'max_steps', 'normalize', 'num_epoch', 'num_layers',\n 'time_horizon', 'sequence_length', 'summary_freq', 'use_recurrent',\n 'graph_scope', 'summary_path', 'memory_size', 'use_curiosity', 'curiosity_strength',\n 'curiosity_enc_size']\n\n for k in self.param_keys:\n if k not in trainer_parameters:\n raise UnityTrainerException(\"The hyperparameter {0} could not be found for the PPO trainer of \"\n \"brain {1}.\".format(k, brain_name))\n\n super(PPOTrainer, self).__init__(sess, env, brain_name, trainer_parameters, training)\n\n self.use_recurrent = trainer_parameters[\"use_recurrent\"]\n self.use_curiosity = bool(trainer_parameters['use_curiosity'])\n self.sequence_length = 1\n self.step = 0\n self.has_updated = False\n self.m_size = None\n if self.use_recurrent:\n self.m_size = trainer_parameters[\"memory_size\"]\n self.sequence_length = trainer_parameters[\"sequence_length\"]\n if self.m_size == 0:\n raise UnityTrainerException(\"The memory size for brain {0} is 0 even though the trainer uses recurrent.\"\n .format(brain_name))\n elif self.m_size % 4 != 0:\n raise UnityTrainerException(\"The memory size for brain {0} is {1} but it must be divisible by 4.\"\n .format(brain_name, self.m_size))\n\n self.variable_scope = trainer_parameters['graph_scope']\n with tf.variable_scope(self.variable_scope):\n tf.set_random_seed(seed)\n self.model = PPOModel(env.brains[brain_name],\n lr=float(trainer_parameters['learning_rate']),\n h_size=int(trainer_parameters['hidden_units']),\n epsilon=float(trainer_parameters['epsilon']),\n beta=float(trainer_parameters['beta']),\n max_step=float(trainer_parameters['max_steps']),\n normalize=trainer_parameters['normalize'],\n use_recurrent=trainer_parameters['use_recurrent'],\n num_layers=int(trainer_parameters['num_layers']),\n m_size=self.m_size,\n use_curiosity=bool(trainer_parameters['use_curiosity']),\n curiosity_strength=float(trainer_parameters['curiosity_strength']),\n curiosity_enc_size=float(trainer_parameters['curiosity_enc_size']))\n\n stats = {'cumulative_reward': [], 'episode_length': [], 'value_estimate': [],\n 'entropy': [], 'value_loss': [], 'policy_loss': [], 'learning_rate': []}\n if self.use_curiosity:\n stats['forward_loss'] = []\n stats['inverse_loss'] = []\n stats['intrinsic_reward'] = []\n self.intrinsic_rewards = {}\n self.stats = stats\n\n self.training_buffer = Buffer()\n self.cumulative_rewards = {}\n self.episode_steps = {}\n self.is_continuous_action = (env.brains[brain_name].vector_action_space_type == \"continuous\")\n self.is_continuous_observation = (env.brains[brain_name].vector_observation_space_type == \"continuous\")\n self.use_visual_obs = (env.brains[brain_name].number_visual_observations > 0)\n self.use_vector_obs = (env.brains[brain_name].vector_observation_space_size > 0)\n self.summary_path = trainer_parameters['summary_path']\n if not os.path.exists(self.summary_path):\n os.makedirs(self.summary_path)\n\n self.summary_writer = tf.summary.FileWriter(self.summary_path)\n\n self.inference_run_list = [self.model.output, self.model.all_probs, self.model.value,\n self.model.entropy, self.model.learning_rate]\n if self.is_continuous_action:\n self.inference_run_list.append(self.model.output_pre)\n if self.use_recurrent:\n self.inference_run_list.extend([self.model.memory_out])\n if (self.is_training and self.is_continuous_observation and\n self.use_vector_obs and self.trainer_parameters['normalize']):\n self.inference_run_list.extend([self.model.update_mean, self.model.update_variance])\n\n def __str__(self):\n return '''Hyperparameters for the PPO Trainer of brain {0}: \\n{1}'''.format(\n self.brain_name, '\\n'.join(['\\t{0}:\\t{1}'.format(x, self.trainer_parameters[x]) for x in self.param_keys]))\n\n @property\n def parameters(self):\n \"\"\"\n Returns the trainer parameters of the trainer.\n \"\"\"\n return self.trainer_parameters\n\n @property\n def graph_scope(self):\n \"\"\"\n Returns the graph scope of the trainer.\n \"\"\"\n return self.variable_scope\n\n @property\n def get_max_steps(self):\n \"\"\"\n Returns the maximum number of steps. Is used to know when the trainer should be stopped.\n :return: The maximum number of steps of the trainer\n \"\"\"\n return float(self.trainer_parameters['max_steps'])\n\n @property\n def get_step(self):\n \"\"\"\n Returns the number of steps the trainer has performed\n :return: the step count of the trainer\n \"\"\"\n return self.step\n\n @property\n def get_last_reward(self):\n \"\"\"\n Returns the last reward the trainer has had\n :return: the new last reward\n \"\"\"\n return self.sess.run(self.model.last_reward)\n\n def increment_step_and_update_last_reward(self):\n \"\"\"\n Increment the step count of the trainer and Updates the last reward\n \"\"\"\n if len(self.stats['cumulative_reward']) > 0:\n mean_reward = np.mean(self.stats['cumulative_reward'])\n self.sess.run([self.model.update_reward,\n self.model.increment_step],\n feed_dict={self.model.new_reward: mean_reward})\n else:\n self.sess.run(self.model.increment_step)\n self.step = self.sess.run(self.model.global_step)\n\n def take_action(self, all_brain_info: AllBrainInfo):\n \"\"\"\n Decides actions given observations information, and takes them in environment.\n :param all_brain_info: A dictionary of brain names and BrainInfo from environment.\n :return: a tuple containing action, memories, values and an object\n to be passed to add exp_samples\n \"\"\"\n curr_brain_info = all_brain_info[self.brain_name]\n if len(curr_brain_info.agents) == 0:\n return [], [], [], None\n\n feed_dict = {self.model.batch_size: len(curr_brain_info.vector_observations),\n self.model.sequence_length: 1}\n if self.use_recurrent:\n if not self.is_continuous_action:\n feed_dict[self.model.prev_action] = curr_brain_info.previous_vector_actions.flatten()\n if curr_brain_info.memories.shape[1] == 0:\n curr_brain_info.memories = np.zeros((len(curr_brain_info.agents), self.m_size))\n feed_dict[self.model.memory_in] = curr_brain_info.memories\n if self.use_visual_obs:\n for i, _ in enumerate(curr_brain_info.visual_observations):\n feed_dict[self.model.visual_in[i]] = curr_brain_info.visual_observations[i]\n if self.use_vector_obs:\n feed_dict[self.model.vector_in] = curr_brain_info.vector_observations\n\n values = self.sess.run(self.inference_run_list, feed_dict=feed_dict)\n run_out = dict(zip(self.inference_run_list, values))\n\n self.stats['value_estimate'].append(run_out[self.model.value].mean())\n self.stats['entropy'].append(run_out[self.model.entropy].mean())\n self.stats['learning_rate'].append(run_out[self.model.learning_rate])\n if self.use_recurrent:\n return run_out[self.model.output], run_out[self.model.memory_out], None, run_out\n else:\n return run_out[self.model.output], None, None, run_out\n\n def construct_curr_info(self, next_info: BrainInfo) -> BrainInfo:\n \"\"\"\n Constructs a BrainInfo which contains the most recent previous exp_samples for all agents info\n which correspond to the agents in a provided next_info.\n :BrainInfo next_info: A t+1 BrainInfo.\n :return: curr_info: Reconstructed BrainInfo to match agents of next_info.\n \"\"\"\n visual_observations = [[]]\n vector_observations = []\n text_observations = []\n memories = []\n rewards = []\n local_dones = []\n max_reacheds = []\n agents = []\n prev_vector_actions = []\n prev_text_actions = []\n for agent_id in next_info.agents:\n agent_brain_info = self.training_buffer[agent_id].last_brain_info\n agent_index = agent_brain_info.agents.index(agent_id)\n if agent_brain_info is None:\n agent_brain_info = next_info\n for i in range(len(next_info.visual_observations)):\n visual_observations[i].append(agent_brain_info.visual_observations[i][agent_index])\n vector_observations.append(agent_brain_info.vector_observations[agent_index])\n text_observations.append(agent_brain_info.text_observations[agent_index])\n if self.use_recurrent:\n memories.append(agent_brain_info.memories[agent_index])\n rewards.append(agent_brain_info.rewards[agent_index])\n local_dones.append(agent_brain_info.local_done[agent_index])\n max_reacheds.append(agent_brain_info.max_reached[agent_index])\n agents.append(agent_brain_info.agents[agent_index])\n prev_vector_actions.append(agent_brain_info.previous_vector_actions[agent_index])\n prev_text_actions.append(agent_brain_info.previous_text_actions[agent_index])\n curr_info = BrainInfo(visual_observations, vector_observations, text_observations, memories, rewards,\n agents, local_dones, prev_vector_actions, prev_text_actions, max_reacheds)\n return curr_info\n\n def generate_intrinsic_rewards(self, curr_info, next_info):\n \"\"\"\n Generates intrinsic reward used for Curiosity-based training.\n :BrainInfo curr_info: Current BrainInfo.\n :BrainInfo next_info: Next BrainInfo.\n :return: Intrinsic rewards for all agents.\n \"\"\"\n if self.use_curiosity:\n feed_dict = {self.model.batch_size: len(next_info.vector_observations), self.model.sequence_length: 1}\n if self.is_continuous_action:\n feed_dict[self.model.output] = next_info.previous_vector_actions\n else:\n feed_dict[self.model.action_holder] = next_info.previous_vector_actions.flatten()\n\n if curr_info.agents != next_info.agents:\n curr_info = self.construct_curr_info(next_info)\n\n if self.use_visual_obs:\n for i in range(len(curr_info.visual_observations)):\n feed_dict[self.model.visual_in[i]] = curr_info.visual_observations[i]\n feed_dict[self.model.next_visual_in[i]] = next_info.visual_observations[i]\n if self.use_vector_obs:\n feed_dict[self.model.vector_in] = curr_info.vector_observations\n feed_dict[self.model.next_vector_in] = next_info.vector_observations\n if self.use_recurrent:\n if curr_info.memories.shape[1] == 0:\n curr_info.memories = np.zeros((len(curr_info.agents), self.m_size))\n feed_dict[self.model.memory_in] = curr_info.memories\n intrinsic_rewards = self.sess.run(self.model.intrinsic_reward,\n feed_dict=feed_dict) * float(self.has_updated)\n return intrinsic_rewards\n else:\n return None\n\n def generate_value_estimate(self, brain_info, idx):\n \"\"\"\n Generates value estimates for bootstrapping.\n :param brain_info: BrainInfo to be used for bootstrapping.\n :param idx: Index in BrainInfo of agent.\n :return: Value estimate.\n \"\"\"\n feed_dict = {self.model.batch_size: 1, self.model.sequence_length: 1}\n if self.use_visual_obs:\n for i in range(len(brain_info.visual_observations)):\n feed_dict[self.model.visual_in[i]] = [brain_info.visual_observations[i][idx]]\n if self.use_vector_obs:\n feed_dict[self.model.vector_in] = [brain_info.vector_observations[idx]]\n if self.use_recurrent:\n if brain_info.memories.shape[1] == 0:\n brain_info.memories = np.zeros(\n (len(brain_info.vector_observations), self.m_size))\n feed_dict[self.model.memory_in] = [brain_info.memories[idx]]\n if not self.is_continuous_action and self.use_recurrent:\n feed_dict[self.model.prev_action] = brain_info.previous_vector_actions[idx].flatten()\n value_estimate = self.sess.run(self.model.value, feed_dict)\n return value_estimate\n\n def add_experiences(self, curr_all_info: AllBrainInfo, next_all_info: AllBrainInfo, take_action_outputs):\n \"\"\"\n Adds exp_samples to each agent's experience history.\n :param curr_all_info: Dictionary of all current brains and corresponding BrainInfo.\n :param next_all_info: Dictionary of all current brains and corresponding BrainInfo.\n :param take_action_outputs: The outputs of the take action method.\n \"\"\"\n curr_info = curr_all_info[self.brain_name]\n next_info = next_all_info[self.brain_name]\n\n for agent_id in curr_info.agents:\n self.training_buffer[agent_id].last_brain_info = curr_info\n self.training_buffer[agent_id].last_take_action_outputs = take_action_outputs\n\n intrinsic_rewards = self.generate_intrinsic_rewards(curr_info, next_info)\n\n for agent_id in next_info.agents:\n stored_info = self.training_buffer[agent_id].last_brain_info\n stored_take_action_outputs = self.training_buffer[agent_id].last_take_action_outputs\n if stored_info is not None:\n idx = stored_info.agents.index(agent_id)\n next_idx = next_info.agents.index(agent_id)\n if not stored_info.local_done[idx]:\n if self.use_visual_obs:\n for i, _ in enumerate(stored_info.visual_observations):\n self.training_buffer[agent_id]['visual_obs%d' % i].append(\n stored_info.visual_observations[i][idx])\n self.training_buffer[agent_id]['next_visual_obs%d' % i].append(\n next_info.visual_observations[i][idx])\n if self.use_vector_obs:\n self.training_buffer[agent_id]['vector_obs'].append(stored_info.vector_observations[idx])\n self.training_buffer[agent_id]['next_vector_in'].append(\n next_info.vector_observations[next_idx])\n if self.use_recurrent:\n if stored_info.memories.shape[1] == 0:\n stored_info.memories = np.zeros((len(stored_info.agents), self.m_size))\n self.training_buffer[agent_id]['memory'].append(stored_info.memories[idx])\n actions = stored_take_action_outputs[self.model.output]\n if self.is_continuous_action:\n actions_pre = stored_take_action_outputs[self.model.output_pre]\n self.training_buffer[agent_id]['actions_pre'].append(actions_pre[idx])\n a_dist = stored_take_action_outputs[self.model.all_probs]\n value = stored_take_action_outputs[self.model.value]\n self.training_buffer[agent_id]['actions'].append(actions[idx])\n self.training_buffer[agent_id]['prev_action'].append(stored_info.previous_vector_actions[idx])\n self.training_buffer[agent_id]['masks'].append(1.0)\n if self.use_curiosity:\n self.training_buffer[agent_id]['rewards'].append(next_info.rewards[next_idx] +\n intrinsic_rewards[next_idx])\n else:\n self.training_buffer[agent_id]['rewards'].append(next_info.rewards[next_idx])\n self.training_buffer[agent_id]['action_probs'].append(a_dist[idx])\n self.training_buffer[agent_id]['value_estimates'].append(value[idx][0])\n\n if agent_id not in self.cumulative_rewards:\n self.cumulative_rewards[agent_id] = 0\n self.cumulative_rewards[agent_id] += next_info.rewards[next_idx]\n if self.use_curiosity:\n if agent_id not in self.intrinsic_rewards:\n self.intrinsic_rewards[agent_id] = 0\n self.intrinsic_rewards[agent_id] += intrinsic_rewards[next_idx]\n if not next_info.local_done[next_idx]:\n if agent_id not in self.episode_steps:\n self.episode_steps[agent_id] = 0\n self.episode_steps[agent_id] += 1\n\n def process_experiences(self, current_info: AllBrainInfo, new_info: AllBrainInfo):\n \"\"\"\n Checks agent histories for processing condition, and processes them as necessary.\n Processing involves calculating value and advantage targets for model updating step.\n :param current_info: Dictionary of all current brains and corresponding BrainInfo.\n :param new_info: Dictionary of all next brains and corresponding BrainInfo.\n \"\"\"\n\n info = new_info[self.brain_name]\n for l in range(len(info.agents)):\n agent_actions = self.training_buffer[info.agents[l]]['actions']\n if ((info.local_done[l] or len(agent_actions) > self.trainer_parameters['time_horizon'])\n and len(agent_actions) > 0):\n agent_id = info.agents[l]\n if info.local_done[l] and not info.max_reached[l]:\n value_next = 0.0\n else:\n if info.max_reached[l]:\n bootstrapping_info = self.training_buffer[agent_id].last_brain_info\n idx = bootstrapping_info.agents.index(agent_id)\n else:\n bootstrapping_info = info\n idx = l\n value_next = self.generate_value_estimate(bootstrapping_info, idx)\n\n self.training_buffer[agent_id]['advantages'].set(\n get_gae(\n rewards=self.training_buffer[agent_id]['rewards'].get_batch(),\n value_estimates=self.training_buffer[agent_id]['value_estimates'].get_batch(),\n value_next=value_next,\n gamma=self.trainer_parameters['gamma'],\n lambd=self.trainer_parameters['lambd']))\n self.training_buffer[agent_id]['discounted_returns'].set(\n self.training_buffer[agent_id]['advantages'].get_batch()\n + self.training_buffer[agent_id]['value_estimates'].get_batch())\n\n self.training_buffer.append_update_buffer(agent_id, batch_size=None,\n training_length=self.sequence_length)\n\n self.training_buffer[agent_id].reset_agent()\n if info.local_done[l]:\n self.stats['cumulative_reward'].append(\n self.cumulative_rewards.get(agent_id, 0))\n self.stats['episode_length'].append(\n self.episode_steps.get(agent_id, 0))\n self.cumulative_rewards[agent_id] = 0\n self.episode_steps[agent_id] = 0\n if self.use_curiosity:\n self.stats['intrinsic_reward'].append(\n self.intrinsic_rewards.get(agent_id, 0))\n self.intrinsic_rewards[agent_id] = 0\n\n def end_episode(self):\n \"\"\"\n A signal that the Episode has ended. The buffer must be reset. \n Get only called when the academy resets.\n \"\"\"\n self.training_buffer.reset_all()\n for agent_id in self.cumulative_rewards:\n self.cumulative_rewards[agent_id] = 0\n for agent_id in self.episode_steps:\n self.episode_steps[agent_id] = 0\n if self.use_curiosity:\n for agent_id in self.intrinsic_rewards:\n self.intrinsic_rewards[agent_id] = 0\n\n def is_ready_update(self):\n \"\"\"\n Returns whether or not the trainer has enough elements to run update model\n :return: A boolean corresponding to whether or not update_model() can be run\n \"\"\"\n size_of_buffer = len(self.training_buffer.update_buffer['actions'])\n return size_of_buffer > max(int(self.trainer_parameters['buffer_size'] / self.sequence_length), 1)\n\n def update_model(self):\n \"\"\"\n Uses training_buffer to update model.\n \"\"\"\n n_sequences = max(int(self.trainer_parameters['batch_size'] / self.sequence_length), 1)\n value_total, policy_total, forward_total, inverse_total = [], [], [], []\n advantages = self.training_buffer.update_buffer['advantages'].get_batch()\n self.training_buffer.update_buffer['advantages'].set(\n (advantages - advantages.mean()) / (advantages.std() + 1e-10))\n num_epoch = self.trainer_parameters['num_epoch']\n for k in range(num_epoch):\n self.training_buffer.update_buffer.shuffle()\n buffer = self.training_buffer.update_buffer\n for l in range(len(self.training_buffer.update_buffer['actions']) // n_sequences):\n start = l * n_sequences\n end = (l + 1) * n_sequences\n feed_dict = {self.model.batch_size: n_sequences,\n self.model.sequence_length: self.sequence_length,\n self.model.mask_input: np.array(buffer['masks'][start:end]).flatten(),\n self.model.returns_holder: np.array(buffer['discounted_returns'][start:end]).flatten(),\n self.model.old_value: np.array(buffer['value_estimates'][start:end]).flatten(),\n self.model.advantage: np.array(buffer['advantages'][start:end]).reshape([-1, 1]),\n self.model.all_old_probs: np.array(buffer['action_probs'][start:end]).reshape(\n [-1, self.brain.vector_action_space_size])}\n if self.is_continuous_action:\n feed_dict[self.model.output_pre] = np.array(buffer['actions_pre'][start:end]).reshape(\n [-1, self.brain.vector_action_space_size])\n else:\n feed_dict[self.model.action_holder] = np.array(buffer['actions'][start:end]).flatten()\n if self.use_recurrent:\n feed_dict[self.model.prev_action] = np.array(buffer['prev_action'][start:end]).flatten()\n if self.use_vector_obs:\n if self.is_continuous_observation:\n total_observation_length = self.brain.vector_observation_space_size * \\\n self.brain.num_stacked_vector_observations\n feed_dict[self.model.vector_in] = np.array(buffer['vector_obs'][start:end]).reshape(\n [-1, total_observation_length])\n if self.use_curiosity:\n feed_dict[self.model.next_vector_in] = np.array(buffer['next_vector_in'][start:end]) \\\n .reshape([-1, total_observation_length])\n else:\n feed_dict[self.model.vector_in] = np.array(buffer['vector_obs'][start:end]).reshape(\n [-1, self.brain.num_stacked_vector_observations])\n if self.use_curiosity:\n feed_dict[self.model.next_vector_in] = np.array(buffer['next_vector_in'][start:end]) \\\n .reshape([-1, self.brain.num_stacked_vector_observations])\n if self.use_visual_obs:\n for i, _ in enumerate(self.model.visual_in):\n _obs = np.array(buffer['visual_obs%d' % i][start:end])\n if self.sequence_length > 1 and self.use_recurrent:\n (_batch, _seq, _w, _h, _c) = _obs.shape\n feed_dict[self.model.visual_in[i]] = _obs.reshape([-1, _w, _h, _c])\n else:\n feed_dict[self.model.visual_in[i]] = _obs\n if self.use_curiosity:\n for i, _ in enumerate(self.model.visual_in):\n _obs = np.array(buffer['next_visual_obs%d' % i][start:end])\n if self.sequence_length > 1 and self.use_recurrent:\n (_batch, _seq, _w, _h, _c) = _obs.shape\n feed_dict[self.model.next_visual_in[i]] = _obs.reshape([-1, _w, _h, _c])\n else:\n feed_dict[self.model.next_visual_in[i]] = _obs\n if self.use_recurrent:\n mem_in = np.array(buffer['memory'][start:end])[:, 0, :]\n feed_dict[self.model.memory_in] = mem_in\n\n run_list = [self.model.value_loss, self.model.policy_loss, self.model.update_batch]\n if self.use_curiosity:\n run_list.extend([self.model.forward_loss, self.model.inverse_loss])\n values = self.sess.run(run_list, feed_dict=feed_dict)\n self.has_updated = True\n run_out = dict(zip(run_list, values))\n value_total.append(run_out[self.model.value_loss])\n policy_total.append(np.abs(run_out[self.model.policy_loss]))\n if self.use_curiosity:\n inverse_total.append(run_out[self.model.inverse_loss])\n forward_total.append(run_out[self.model.forward_loss])\n self.stats['value_loss'].append(np.mean(value_total))\n self.stats['policy_loss'].append(np.mean(policy_total))\n if self.use_curiosity:\n self.stats['forward_loss'].append(np.mean(forward_total))\n self.stats['inverse_loss'].append(np.mean(inverse_total))\n self.training_buffer.reset_update_buffer()\n\n\ndef discount_rewards(r, gamma=0.99, value_next=0.0):\n \"\"\"\n Computes discounted sum of future rewards for use in updating value estimate.\n :param r: List of rewards.\n :param gamma: Discount factor.\n :param value_next: T+1 value estimate for returns calculation.\n :return: discounted sum of future rewards as list.\n \"\"\"\n discounted_r = np.zeros_like(r)\n running_add = value_next\n for t in reversed(range(0, r.size)):\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r\n\n\ndef get_gae(rewards, value_estimates, value_next=0.0, gamma=0.99, lambd=0.95):\n \"\"\"\n Computes generalized advantage estimate for use in updating policy.\n :param rewards: list of rewards for time-steps t to T.\n :param value_next: Value estimate for time-step T+1.\n :param value_estimates: list of value estimates for time-steps t to T.\n :param gamma: Discount factor.\n :param lambd: GAE weighing factor.\n :return: list of advantage estimates for time-steps t to T.\n \"\"\"\n value_estimates = np.asarray(value_estimates.tolist() + [value_next])\n delta_t = rewards + gamma * value_estimates[1:] - value_estimates[:-1]\n advantage = discount_rewards(r=delta_t, gamma=gamma * lambd)\n return advantage\n" ]
[ [ "tensorflow.summary.FileWriter", "numpy.abs", "tensorflow.set_random_seed", "numpy.zeros_like", "numpy.mean", "tensorflow.variable_scope", "numpy.array" ] ]
samukie/fml-project-2021
[ "20a424cca97cf8df6fb7c2ff4b41ce834031077d" ]
[ "bomberman_rl/agent_code/rule_based_agent/callbacks.py" ]
[ "from collections import deque\nfrom random import shuffle\n\nimport numpy as np\n\n\ndef look_for_targets(free_space, start, targets, logger=None):\n \"\"\"Find direction of closest target that can be reached via free tiles.\n\n Performs a breadth-first search of the reachable free tiles until a target is encountered.\n If no target can be reached, the path that takes the agent closest to any target is chosen.\n\n Args:\n free_space: Boolean numpy array. True for free tiles and False for obstacles.\n start: the coordinate from which to begin the search.\n targets: list or array holding the coordinates of all target tiles.\n logger: optional logger object for debugging.\n Returns:\n coordinate of first step towards closest target or towards tile closest to any target.\n \"\"\"\n if len(targets) == 0: return None\n\n frontier = [start]\n parent_dict = {start: start}\n dist_so_far = {start: 0}\n best = start\n best_dist = np.sum(np.abs(np.subtract(targets, start)), axis=1).min()\n\n while len(frontier) > 0:\n current = frontier.pop(0)\n # Find distance from current position to all targets, track closest\n d = np.sum(np.abs(np.subtract(targets, current)), axis=1).min()\n if d + dist_so_far[current] <= best_dist:\n best = current\n best_dist = d + dist_so_far[current]\n if d == 0:\n # Found path to a target's exact position, mission accomplished!\n best = current\n break\n # Add unexplored free neighboring tiles to the queue in a random order\n x, y = current\n neighbors = [(x, y) for (x, y) in [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)] if free_space[x, y]]\n shuffle(neighbors)\n for neighbor in neighbors:\n if neighbor not in parent_dict:\n frontier.append(neighbor)\n parent_dict[neighbor] = current\n dist_so_far[neighbor] = dist_so_far[current] + 1\n if logger: logger.debug(f'Suitable target found at {best}')\n # Determine the first step towards the best found target tile\n current = best\n while True:\n if parent_dict[current] == start: return current\n current = parent_dict[current]\n\n\ndef setup(self):\n \"\"\"Called once before a set of games to initialize data structures etc.\n\n The 'self' object passed to this method will be the same in all other\n callback methods. You can assign new properties (like bomb_history below)\n here or later on and they will be persistent even across multiple games.\n You can also use the self.logger object at any time to write to the log\n file for debugging (see https://docs.python.org/3.7/library/logging.html).\n \"\"\"\n self.logger.debug('Successfully entered setup code')\n np.random.seed()\n # Fixed length FIFO queues to avoid repeating the same actions\n self.bomb_history = deque([], 5)\n self.coordinate_history = deque([], 20)\n # While this timer is positive, agent will not hunt/attack opponents\n self.ignore_others_timer = 0\n\n\ndef act(self, game_state):\n \"\"\"\n Called each game step to determine the agent's next action.\n\n You can find out about the state of the game environment via game_state,\n which is a dictionary. Consult 'get_state_for_agent' in environment.py to see\n what it contains.\n \"\"\"\n self.logger.info('Picking action according to rule set')\n\n # Gather information about the game state\n arena = game_state['field']\n _, score, bombs_left, (x, y) = game_state['self']\n bombs = game_state['bombs']\n bomb_xys = [xy for (xy, t) in bombs]\n others = [xy for (n, s, b, xy) in game_state['others']]\n coins = game_state['coins']\n bomb_map = np.ones(arena.shape) * 5\n for (xb, yb), t in bombs:\n for (i, j) in [(xb + h, yb) for h in range(-3, 4)] + [(xb, yb + h) for h in range(-3, 4)]:\n if (0 < i < bomb_map.shape[0]) and (0 < j < bomb_map.shape[1]):\n bomb_map[i, j] = min(bomb_map[i, j], t)\n\n # If agent has been in the same location three times recently, it's a loop\n if self.coordinate_history.count((x, y)) > 2:\n self.ignore_others_timer = 5\n else:\n self.ignore_others_timer -= 1\n self.coordinate_history.append((x, y))\n\n # Check which moves make sense at all\n directions = [(x, y), (x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)]\n valid_tiles, valid_actions = [], []\n for d in directions:\n if ((arena[d] == 0) and\n (game_state['explosion_map'][d] <= 1) and\n (bomb_map[d] > 0) and\n (not d in others) and\n (not d in bomb_xys)):\n valid_tiles.append(d)\n if (x - 1, y) in valid_tiles: valid_actions.append('LEFT')\n if (x + 1, y) in valid_tiles: valid_actions.append('RIGHT')\n if (x, y - 1) in valid_tiles: valid_actions.append('UP')\n if (x, y + 1) in valid_tiles: valid_actions.append('DOWN')\n if (x, y) in valid_tiles: valid_actions.append('WAIT')\n # Disallow the BOMB action if agent dropped a bomb in the same spot recently\n if (bombs_left > 0) and (x, y) not in self.bomb_history: valid_actions.append('BOMB')\n self.logger.debug(f'Valid actions: {valid_actions}')\n\n # Collect basic action proposals in a queue\n # Later on, the last added action that is also valid will be chosen\n action_ideas = ['UP', 'DOWN', 'LEFT', 'RIGHT']\n shuffle(action_ideas)\n\n # Compile a list of 'targets' the agent should head towards\n dead_ends = [(x, y) for x in range(1, 16) for y in range(1, 16) if (arena[x, y] == 0)\n and ([arena[x + 1, y], arena[x - 1, y], arena[x, y + 1], arena[x, y - 1]].count(0) == 1)]\n crates = [(x, y) for x in range(1, 16) for y in range(1, 16) if (arena[x, y] == 1)]\n targets = coins + dead_ends + crates\n # Add other agents as targets if in hunting mode or no crates/coins left\n if self.ignore_others_timer <= 0 or (len(crates) + len(coins) == 0):\n targets.extend(others)\n\n # Exclude targets that are currently occupied by a bomb\n targets = [targets[i] for i in range(len(targets)) if targets[i] not in bomb_xys]\n\n # Take a step towards the most immediately interesting target\n free_space = arena == 0\n if self.ignore_others_timer > 0:\n for o in others:\n free_space[o] = False\n d = look_for_targets(free_space, (x, y), targets, self.logger)\n if d == (x, y - 1): action_ideas.append('UP')\n if d == (x, y + 1): action_ideas.append('DOWN')\n if d == (x - 1, y): action_ideas.append('LEFT')\n if d == (x + 1, y): action_ideas.append('RIGHT')\n if d is None:\n self.logger.debug('All targets gone, nothing to do anymore')\n action_ideas.append('WAIT')\n\n # Add proposal to drop a bomb if at dead end\n if (x, y) in dead_ends:\n action_ideas.append('BOMB')\n # Add proposal to drop a bomb if touching an opponent\n if len(others) > 0:\n if (min(abs(xy[0] - x) + abs(xy[1] - y) for xy in others)) <= 1:\n action_ideas.append('BOMB')\n # Add proposal to drop a bomb if arrived at target and touching crate\n if d == (x, y) and ([arena[x + 1, y], arena[x - 1, y], arena[x, y + 1], arena[x, y - 1]].count(1) > 0):\n action_ideas.append('BOMB')\n\n # Add proposal to run away from any nearby bomb about to blow\n for (xb, yb), t in bombs:\n if (xb == x) and (abs(yb - y) < 4):\n # Run away\n if (yb > y): action_ideas.append('UP')\n if (yb < y): action_ideas.append('DOWN')\n # If possible, turn a corner\n action_ideas.append('LEFT')\n action_ideas.append('RIGHT')\n if (yb == y) and (abs(xb - x) < 4):\n # Run away\n if (xb > x): action_ideas.append('LEFT')\n if (xb < x): action_ideas.append('RIGHT')\n # If possible, turn a corner\n action_ideas.append('UP')\n action_ideas.append('DOWN')\n # Try random direction if directly on top of a bomb\n for (xb, yb), t in bombs:\n if xb == x and yb == y:\n action_ideas.extend(action_ideas[:4])\n\n # Pick last action added to the proposals list that is also valid\n while len(action_ideas) > 0:\n a = action_ideas.pop()\n if a in valid_actions:\n # Keep track of chosen action for cycle detection\n if a == 'BOMB':\n self.bomb_history.append((x, y))\n\n return a\n" ]
[ [ "numpy.subtract", "numpy.random.seed", "numpy.ones" ] ]
aroongta/Pedestrian_Trajectory_Prediction
[ "de368ef502391bbc87100314f96a3ab1f6b9959a" ]
[ "RNN/data/lstm_prototype_v1.py" ]
[ "# definition vanilla LSTM network233\n\n# import relevant libraries\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport matplotlib\nimport numpy as np\nimport trajectories\nimport loader\nimport argparse\nimport gc\nimport logging\nimport os\nimport sys\nimport time\nimport matplotlib.pyplot as plt \n\n# build argparser\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--input_size', type=int, default=2)\nparser.add_argument('--output_size', type=int, default=5)\n# RNN size parameter (dimension of the output/hidden state)\nparser.add_argument('--rnn_size', type=int, default=128,\n help='size of RNN hidden state')\n# Size of each batch parameter\nparser.add_argument('--batch_size', type=int, default=10,\n help='minibatch size')\n# Length of sequence to be considered parameter\nparser.add_argument('--seq_length', type=int, default=20,\n help='RNN sequence length')\nparser.add_argument('--pred_length', type=int, default=12,\n help='prediction length')\n# Number of epochs parameter\nparser.add_argument('--num_epochs', type=int, default=30,\n help='number of epochs')\n# Frequency at which the model should be saved parameter\nparser.add_argument('--save_every', type=int, default=400,\n help='save frequency')\n# TODO: (resolve) Clipping gradients for now. No idea whether we should\n# Gradient value at which it should be clipped\nparser.add_argument('--grad_clip', type=float, default=10.,\n help='clip gradients at this value')\n# Learning rate parameter\nparser.add_argument('--learning_rate', type=float, default=0.003,\n help='learning rate')\n# Decay rate for the learning rate parameter\nparser.add_argument('--decay_rate', type=float, default=0.95,\n help='decay rate for rmsprop')\n# Dropout not implemented.\n# Dropout probability parameter\nparser.add_argument('--dropout', type=float, default=0.5,\n help='dropout probability')\n# Dimension of the embeddings parameter\nparser.add_argument('--embedding_size', type=int, default=64,\n help='Embedding dimension for the spatial coordinates')\n# Size of neighborhood to be considered parameter\nparser.add_argument('--neighborhood_size', type=int, default=32,\n help='Neighborhood size to be considered for social grid')\n# Size of the social grid parameter\nparser.add_argument('--grid_size', type=int, default=4,\n help='Grid size of the social grid')\n# Maximum number of pedestrians to be considered\nparser.add_argument('--maxNumPeds', type=int, default=27,\n help='Maximum Number of Pedestrians')\n\n# Lambda regularization parameter (L2)\nparser.add_argument('--lambda_param', type=float, default=0.0005,\n help='L2 regularization parameter')\n# Cuda parameter\nparser.add_argument('--use_cuda', action=\"store_true\", default=False,\n help='Use GPU or not')\n# GRU parameter\nparser.add_argument('--gru', action=\"store_true\", default=False,\n help='True : GRU cell, False: LSTM cell')\n# drive option\nparser.add_argument('--drive', action=\"store_true\", default=False,\n help='Use Google drive or not')\n# number of validation will be used\nparser.add_argument('--num_validation', type=int, default=2,\n help='Total number of validation dataset for validate accuracy')\n# frequency of validation\nparser.add_argument('--freq_validation', type=int, default=1,\n help='Frequency number(epoch) of validation using validation data')\n# frequency of optimizer learning decay\nparser.add_argument('--freq_optimizer', type=int, default=8,\n help='Frequency number(epoch) of learning decay for optimizer')\n# store grids in epoch 0 and use further.2 times faster -> Intensive memory use around 12 GB\nparser.add_argument('--grid', action=\"store_true\", default=True,\n help='Whether store grids and use further epoch')\n\n# Dataset options\nparser.add_argument('--dataset_name', default='zara1', type=str)\nparser.add_argument('--delim', default='\\t')\nparser.add_argument('--loader_num_workers', default=4, type=int)\nparser.add_argument('--obs_len', default=8, type=int)\nparser.add_argument('--pred_len', default=12, type=int)\nparser.add_argument('--skip', default=1, type=int)\n\nargs = parser.parse_args()\n\ndata_dir = \"/home/roongtaaahsih/ped_traj/sgan_ab/scripts/datasets/eth/train\"\n\n\n\"\"\" Class for defining the Vanilla LSTM Network \"\"\"\nclass VanillaLSTMNet(nn.Module):\n def __init__(self):\n \"\"\"\" Initialize the network here. You can use a combination of nn.LSTMCell and nn.Linear. \n Number of layers and hidden size is up to you. Hint: A network with less than 3 layers and \n 64 dimensionality should suffice.\n \"\"\"\n super(VanillaLSTMNet, self).__init__()\n \n # Inputs to the LSTMCell's are (input, (h_0, c_0)):\n # 1. input of shape (batch, input_size): tensor containing input \n # features\n # 2a. h_0 of shape (batch, hidden_size): tensor containing the \n # initial hidden state for each element in the batch.\n # 2b. c_0 of shape (batch, hidden_size): tensor containing the \n # initial cell state for each element in the batch.\n \n # Outputs: h_1, c_1\n # 1. h_1 of shape (batch, hidden_size): tensor containing the next \n # hidden state for each element in the batch\n # 2. c_1 of shape (batch, hidden_size): tensor containing the next \n # cell state for each element in the batch\n \n # set parameters for network architecture\n self.embedding_size = 64\n self.input_size = 2\n self.output_size = 2\n self.dropout_prob = 0.5 \n \n # linear layer to embed the input position\n self.input_embedding_layer = nn.Linear(self.input_size, self.embedding_size)\n \n # define lstm cell\n self.lstm_cell = nn.LSTMCell(self.embedding_size, self.embedding_size)\n\n # linear layer to map the hidden state of LSTM to output\n self.output_layer = nn.Linear(self.embedding_size, self.output_size)\n \n # ReLU and dropout unit\n self.relu = nn.ReLU()\n self.dropout = nn.Dropout(self.dropout_prob)\n \n pass\n \n def forward(self, observed_batch, pred_len = 0):\n \"\"\" This function takes the input sequence and predicts the output sequence. \n \n args:\n observed_seq (torch.Tensor) : Input sequence with shape <batch size x sequence length x number of dimensions>\n pred_len (int) : Length of the sequence to be predicted.\n\n \"\"\"\n \n '''\n Forward pass for the model.\n '''\n \n output_seq = []\n\n ht = torch.zeros(observed_batch.size(1), self.embedding_size, dtype=torch.float)\n ct = torch.zeros(observed_batch.size(1), self.embedding_size, dtype=torch.float)\n\n seq, peds, coords = observed_batch.shape\n #Feeding the observed trajectory to the network\n for step in range(seq):\n observed_step = observed_batch[step, :, :]\n lin_out = self.input_embedding_layer(observed_step.view(peds,2))\n ht, ct = self.lstm_cell(lin_out, (ht, ct))\n out = self.output_layer(self.dropout(ht))\n\n print(\"out's shape:\", out.shape)\n #Getting the predicted trajectory from the pedestrian \n for i in range(pred_len):\n lin_out = self.input_embedding_layer(out)\n ht, ct = self.lstm_cell(lin_out, (ht,ct))\n out = self.output_layer(self.dropout(ht))\n output_seq += [out]\n\n output_seq = torch.stack(output_seq).squeeze() # convert list to tensor\n return output_seq\n\n# Defining test function to return avg test loss for each epoch\ndef test(vanilla_lstm_net,args):\n test_data_dir = \"/home/roongtaaahsih/ped_traj/sgan_ab/scripts/datasets/eth/test\"\n\n # retrieve dataloader\n dataset, dataloader = loader.data_loader(args, test_data_dir)\n\n # define parameters for training and testing loops\n pred_len = 12\n criterion = nn.MSELoss() # MSE works best for difference between predicted and actual coordinate paths\n\n # initialize lists for capturing losses\n test_loss = []\n\n # now, test the model\n for i, batch in enumerate(dataloader):\n test_observed_batch = batch[0]\n test_target_batch = batch[1]\n out = vanilla_lstm_net(test_observed_batch, pred_len=pred_len) # forward pass of lstm network for training\n print(\"out's shape:\", out.shape)\n cur_test_loss = criterion(out, test_target_batch) # calculate MSE loss\n print('Current test loss: {}'.format(cur_test_loss.item())) # print current test loss\n test_loss.append(cur_test_loss.item())\n avg_testloss = sum(test_loss)/len(test_loss)\n print(\"============= Average test loss:\", avg_testloss, \"====================\")\n\n return avg_testloss\n\ndef main(args):\n \n # define parameters for training and testing loops\n num_epoch = 5\n # pred_freq = 1\n pred_len = 12\n learning_rate = 0.0005\n \n # get data\n # train_input, train_target, test_input, test_target = getData()\n dataset, dataloader = loader.data_loader(args, data_dir)\n\n # define the network and criterion\n vanilla_lstm_net = VanillaLSTMNet()\n # vanilla_lstm_net.double() # casts tensor to double\n criterion = nn.MSELoss() # MSE works best for difference between predicted and actual coordinate paths\n # define the optimizer\n optimizer = optim.Adam(vanilla_lstm_net.parameters(), lr=learning_rate)\n\n # initialize lists for capturing losses\n train_loss = []\n test_loss = []\n avg_train_loss = []\n avg_test_loss = []\n train_avgD_error=[]\n train_finalD_error=[]\n avg_train_avgD_error=[]\n avg_train_finalD_error=[]\n test_finalD_error=[]\n tets_avgD_error=[]\n std_train_loss = []\n std_test_loss = []\n\n '''train for 'num_epoch' epochs and test every 'pred_freq' epochs & when predicting use pred_len=6'''\n \n ### TRAINING FUNCTION ###\n for i in range(num_epoch):\n print('======================= Epoch: {cur_epoch} / {total_epochs} ======================='.format(cur_epoch=i, total_epochs=num_epoch))\n def closure():\n for i, batch in enumerate(dataloader):\n # print(\"batch length:\", len(batch)) # DEBUG\n train_batch = batch[0]\n target_batch = batch[1]\n print(\"train_batch's shape\", train_batch.shape)\n print(\"target_batch's shape\", target_batch.shape)\n\n seq, peds, coords = train_batch.shape # q is number of pedestrians\n\n #forward pass\n out = vanilla_lstm_net(train_batch, pred_len=pred_len) # forward pass of lstm network for training\n print(\"out's shape:\", out.shape)\n optimizer.zero_grad() # zero out gradients\n cur_train_loss = criterion(out, target_batch) # calculate MSE loss\n print('Current training loss: {}'.format(cur_train_loss.item())) # print current training loss\n #calculating average deisplacement error\n out1=out\n target_batch1=target_batch #making a copy of the tensors to convert them to array\n avgD_error=(np.sum(np.sqrt(np.square(out1[:,:,0].detach().numpy()-target_batch1[:,:,0].detach().numpy())+\n np.square(out1[:,:,1].detach().numpy()-target_batch1[:,:,1].detach().numpy()))))/(pred_len*peds)\n train_avgD_error.append(avgD_error)\n print(\"current avg Disp error:\",avgD_error)\n #calculating final displacement error\n finalD_error=(np.sum(np.sqrt(np.square(out1[pred_len-1,:,0].detach().numpy()-target_batch1[pred_len-1,:,0].detach().numpy())+\n np.square(out1[pred_len-1,:,1].detach().numpy()-target_batch1[pred_len-1,:,1].detach().numpy()))))/peds\n train_finalD_error.append(finalD_error)\n print(\"current final displacement error:\",finalD_error)\n\n train_loss.append(cur_train_loss.item())\n cur_train_loss.backward() # backward prop\n optimizer.step() # step like a mini-batch (after all pedestrians)\n ### end prototyping ###\n\n return cur_train_loss\n optimizer.step(closure) # update weights\n\n # save model at every epoch (uncomment) \n # torch.save(vanilla_lstm_net, './saved_models/vanilla_lstm_model_lr0005.pt')\n # print(\"Saved vanilla_lstm_net!\")\n avg_train_loss.append(np.sum(train_loss)/len(train_loss))\n avg_train_avgD_error.append(np.sum(train_avgD_error)/len(train_avgD_error))\n avg_train_finalD_error.append(np.sum(train_finalD_error)/len(train_finalD_error)) \n std_train_loss.append(np.std(np.asarray(train_loss)))\n train_loss = [] # empty train loss\n\n print(\"$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\")\n print(\"average train loss: {}\".format(avg_train_loss))\n print(\"average std loss: {}\".format(std_train_loss))\n avg_test_loss.append(test(vanilla_lstm_net,args)) ##calliing test function to return avg test loss at each epoch\n\n\n # all epochs have ended, so now save your model\n torch.save(vanilla_lstm_net, './saved_models/Vlstm_model_lr0005_ep5.pt')\n print(\"Saved vanilla_lstm_net!\" + './saved_models/Vlstm_model_lr0005_ep5.pt')\n \n ''' visualize losses vs. epoch'''\n\n plt.figure() # new figure\n plt.title(\"Average train & test loss v/s epoch {} epochs\".format(num_epoch))\n plt.plot(avg_train_loss,label='avg train_loss') \n plt.plot(avg_test_loss,color='red',label='avg test_loss')\n plt.legend()\n plt.show(block=True)\n # plt.show()\n\n\n plt.figure() # new figure\n plt.title(\"Average and final displacement error {} epochs\".format(num_epoch))\n plt.plot(avg_train_finalD_error,label='final displacement error') \n plt.plot(avg_train_avgD_error,color='red',label='avg displacement error')\n plt.legend()\n plt.show(block=True)\n\n #visualizing std deviation v/s epoch\n plt.figure()\n plt.title(\"Std of train loss vs epoch\")\n plt.plot(std_train_loss)\n plt.show(block=True)\n # plt.show()\n\n# main function\n\nif __name__ == '__main__':\n main(args)\n\n" ]
[ [ "matplotlib.pyplot.legend", "torch.nn.Dropout", "numpy.sum", "matplotlib.pyplot.title", "numpy.asarray", "matplotlib.pyplot.plot", "torch.nn.Linear", "torch.nn.LSTMCell", "torch.save", "torch.stack", "torch.nn.ReLU", "matplotlib.pyplot.show", "torch.nn.MSELoss", "matplotlib.pyplot.figure" ] ]
modi975/multilayer-perceptron
[ "4cfc9bd79ec145f6ecd2c5f1c12e9df7156e5e70" ]
[ "utils.py" ]
[ "# Copyright 2017 Abien Fred Agarap\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =========================================================================\n\n\"\"\"Utility functions for data handling\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n__version__ = '0.1.0'\n__author__ = 'Abien Fred Agarap'\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nfrom sklearn.metrics import confusion_matrix\nimport tensorflow as tf\n\n\ndef list_files(path):\n \"\"\"Returns a list of files\n\n Parameter\n ---------\n path : str\n A string consisting of a path containing files.\n\n Returns\n -------\n file_list : list\n A list of the files present in the given directory\n\n Examples\n --------\n >>> PATH = '/home/data'\n >>> list_files(PATH)\n >>> ['/home/data/file1', '/home/data/file2', '/home/data/file3']\n \"\"\"\n\n file_list = []\n for (dir_path, dir_names, file_names) in os.walk(path):\n file_list.extend(os.path.join(dir_path, filename) for filename in file_names)\n return file_list\n\n\ndef plot_confusion_matrix(phase, path, class_names):\n \"\"\"Plots the confusion matrix using matplotlib.\n\n Parameter\n ---------\n phase : str\n String value indicating for what phase is the confusion matrix, i.e. training/validation/testing\n path : str\n Directory where the predicted and actual label NPY files reside\n class_names : str\n List consisting of the class names for the labels\n\n Returns\n -------\n conf : array, shape = [num_classes, num_classes]\n Confusion matrix\n accuracy : float\n Predictive accuracy\n \"\"\"\n\n # list all the results files\n files = list_files(path=path)\n\n labels = np.array([])\n\n for file in files:\n labels_batch = np.load(file)\n labels = np.append(labels, labels_batch)\n\n if (files.index(file) / files.__len__()) % 0.2 == 0:\n print('Done appending {}% of {}'.format((files.index(file) / files.__len__()) * 100, files.__len__()))\n\n labels = np.reshape(labels, newshape=(labels.shape[0] // 4, 4))\n\n print('Done appending NPY files.')\n\n # get the predicted labels\n predictions = labels[:, :2]\n\n # get the actual labels\n actual = labels[:, 2:]\n\n # create a TensorFlow session\n with tf.Session() as sess:\n\n # decode the one-hot encoded labels to single integer\n predictions = sess.run(tf.argmax(predictions, 1))\n actual = sess.run(tf.argmax(actual, 1))\n\n # get the confusion matrix based on the actual and predicted labels\n conf = confusion_matrix(y_true=actual, y_pred=predictions)\n\n # create a confusion matrix plot\n plt.imshow(conf, cmap=plt.cm.Purples, interpolation='nearest')\n\n # set the plot title\n plt.title('Confusion Matrix for {} Phase'.format(phase))\n\n # legend of intensity for the plot\n plt.colorbar()\n\n tick_marks = np.arange(len(class_names))\n plt.xticks(tick_marks, class_names, rotation=45)\n plt.yticks(tick_marks, class_names)\n\n plt.tight_layout()\n plt.ylabel('Actual label')\n plt.xlabel('Predicted label')\n\n # show the plot\n plt.show()\n\n # get the accuracy of the phase\n accuracy = (conf[0][0] + conf[1][1]) / labels.shape[0]\n\n # return the confusion matrix and the accuracy\n return conf, accuracy\n\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.yticks", "matplotlib.pyplot.tight_layout", "numpy.reshape", "numpy.load", "tensorflow.argmax", "sklearn.metrics.confusion_matrix", "matplotlib.pyplot.colorbar", "numpy.append", "tensorflow.Session", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
QiJune/elasticdl
[ "6b01f5b32fd757badff96ed652662bd94afe9263", "6b01f5b32fd757badff96ed652662bd94afe9263" ]
[ "elasticdl/python/data/recordio_gen/heart_recordio_gen.py", "elasticdl/python/tests/embedding_test_module.py" ]
[ "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport os\nimport pathlib\nimport sys\nimport urllib\n\nimport pandas as pd\nimport recordio\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\n\nURL = \"https://storage.googleapis.com/applied-dl/heart.csv\"\n\n\ndef convert_series_to_tf_feature(data_series, columns, dtype_series):\n \"\"\"\n Convert pandas series to TensorFlow features.\n Args:\n data_series: Pandas series of data content.\n columns: Column name array.\n dtype_series: Pandas series of dtypes.\n Return:\n A dict of feature name -> tf.train.Feature\n \"\"\"\n features = {}\n for column_name in columns:\n feature = None\n value = data_series[column_name]\n dtype = dtype_series[column_name]\n\n if dtype == \"int64\":\n feature = tf.train.Feature(\n int64_list=tf.train.Int64List(value=[value])\n )\n elif dtype == \"float64\":\n feature = tf.train.Feature(\n float_list=tf.train.FloatList(value=[value])\n )\n elif dtype == \"str\":\n feature = tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[value.encode(\"utf-8\")])\n )\n elif dtype == \"object\":\n feature = tf.train.Feature(\n bytes_list=tf.train.BytesList(\n value=[str(value).encode(\"utf-8\")]\n )\n )\n else:\n assert False, \"Unrecoginize dtype: {}\".format(dtype)\n\n features[column_name] = feature\n\n return features\n\n\ndef convert_to_recordio_files(data_frame, dir_name, records_per_shard):\n \"\"\"\n Convert a pandas DataFrame to recordio files.\n Args:\n data_frame: A pandas DataFrame to convert_to_recordio_files.\n dir_name: A directory to put the generated recordio files.\n records_per_shard: The record number per shard.\n \"\"\"\n pathlib.Path(dir_name).mkdir(parents=True, exist_ok=True)\n\n row_num = 0\n writer = None\n for index, row in data_frame.iterrows():\n if row_num % records_per_shard == 0:\n if writer:\n writer.close()\n\n shard = row_num // records_per_shard\n file_path_name = os.path.join(dir_name, \"data-%05d\" % shard)\n writer = recordio.Writer(file_path_name)\n\n feature = convert_series_to_tf_feature(\n row, data_frame.columns, data_frame.dtypes\n )\n result_string = tf.train.Example(\n features=tf.train.Features(feature=feature)\n ).SerializeToString()\n writer.write(result_string)\n\n row_num += 1\n\n if writer:\n writer.close()\n\n print(\"Finish data conversion in {}\".format(dir_name))\n\n\ndef load_raw_data(data_dir):\n file_name = os.path.basename(URL)\n file_path = os.path.join(data_dir, file_name)\n pathlib.Path(data_dir).mkdir(parents=True, exist_ok=True)\n if not os.path.exists(file_path):\n urllib.request.urlretrieve(URL, file_path)\n return pd.read_csv(file_path)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--data_dir\",\n help=\"The cache directory to put the data downloaded from the web\",\n )\n parser.add_argument(\n \"--records_per_shard\",\n type=int,\n default=128,\n help=\"Record number per shard\",\n )\n parser.add_argument(\n \"--output_dir\", help=\"The directory for the generated recordio files\"\n )\n\n args = parser.parse_args(sys.argv[1:])\n\n data_frame = load_raw_data(args.data_dir)\n\n train, test = train_test_split(data_frame, test_size=0.2)\n train, val = train_test_split(train, test_size=0.2)\n\n convert_to_recordio_files(\n train, os.path.join(args.output_dir, \"train\"), args.records_per_shard\n )\n convert_to_recordio_files(\n val, os.path.join(args.output_dir, \"val\"), args.records_per_shard\n )\n convert_to_recordio_files(\n test, os.path.join(args.output_dir, \"test\"), args.records_per_shard\n )\n", "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Concatenate, Dense, Flatten\n\nfrom elasticdl.python.elasticdl.layers.embedding import Embedding\n\n\nclass EdlEmbeddingModel(tf.keras.Model):\n def __init__(self, output_dim=16, weights=None):\n \"\"\"\n Arguments:\n output_dim: An Integer. It is the output dimension of embedding\n layers in `EdlEmbeddingModel`.\n weights: A numpy ndarray list. If `weights` is not None, dense\n layer initializes its weights using `weights`.\n \"\"\"\n super(EdlEmbeddingModel, self).__init__(name=\"EdlEmbeddingModel\")\n self.output_dim = output_dim\n if weights:\n if len(weights) != 2:\n raise ValueError(\n \"EdlEmbeddingModel constructor receives weights with \"\n \"length %d, expected %d\" % (len(weights), 2)\n )\n\n self.embedding_1 = Embedding(output_dim)\n self.embedding_2 = Embedding(output_dim)\n self.concat = Concatenate()\n self.dense = Dense(1, weights=weights)\n self.flatten = Flatten()\n\n def call(self, inputs, training=False):\n x = self.concat(\n [\n self.embedding_1(inputs[\"f1\"]),\n self.embedding_1(inputs[\"f2\"]),\n self.embedding_2(inputs[\"f3\"]),\n ]\n )\n x = self.flatten(x)\n x = self.dense(x)\n return x\n\n\n# The model structure of KerasEmbeddingModel should keep same with\n# EdlEmbeddingModel.\nclass KerasEmbeddingModel(tf.keras.Model):\n def __init__(self, input_dim, output_dim=16, weights=None):\n \"\"\"\n Arguments:\n input_dim: An Integer. It is the input dimension of embedding\n layers in `KerasEmbeddingModel`.\n output_dim: An Integer. It is the output dimension of embedding\n layers in `KerasEmbeddingModel`.\n weights: A numpy ndarray list. Unless `weights` is None, embedding\n layer and dense layer initialize their weights using `weights`.\n \"\"\"\n super(KerasEmbeddingModel, self).__init__(name=\"KerasEmbeddingModel\")\n self.output_dim = output_dim\n if weights:\n weight_1 = [weights[0]]\n weight_2 = [weights[1]]\n linear_weights = weights[2:]\n else:\n weight_1, weight_2, linear_weights = None, None, None\n self.embedding_1 = tf.keras.layers.Embedding(\n input_dim, output_dim, weights=weight_1\n )\n self.embedding_2 = tf.keras.layers.Embedding(\n input_dim, output_dim, weights=weight_2\n )\n self.concat = Concatenate()\n self.dense = Dense(1, weights=linear_weights)\n self.flatten = Flatten()\n\n def call(self, inputs, training=False):\n x = self.concat(\n [\n self.embedding_1(inputs[\"f1\"]),\n self.embedding_1(inputs[\"f2\"]),\n self.embedding_2(inputs[\"f3\"]),\n ]\n )\n x = self.flatten(x)\n x = self.dense(x)\n return x\n\n\ndef loss(labels, predictions):\n return tf.reduce_mean(tf.square(predictions - labels))\n\n\ndef dataset_fn(dataset, mode, metadata):\n def _parse_data(record):\n feature_description = {\n \"f1\": tf.io.FixedLenFeature([1], tf.int64),\n \"f2\": tf.io.FixedLenFeature([1], tf.int64),\n \"f3\": tf.io.FixedLenFeature([1], tf.int64),\n \"label\": tf.io.FixedLenFeature([1], tf.int64),\n }\n r = tf.io.parse_single_example(record, feature_description)\n return {\"f1\": r[\"f1\"], \"f2\": r[\"f2\"], \"f3\": r[\"f3\"]}, r[\"label\"]\n\n dataset = dataset.map(_parse_data)\n return dataset\n\n\ndef optimizer(lr=0.1):\n return tf.optimizers.SGD(lr)\n\n\ndef eval_metrics_fn(predictions, labels):\n return {\"mse\": tf.reduce_mean(tf.square(predictions - labels))}\n" ]
[ [ "pandas.read_csv", "sklearn.model_selection.train_test_split", "tensorflow.train.Features", "tensorflow.train.FloatList", "tensorflow.train.Int64List" ], [ "tensorflow.keras.layers.Concatenate", "tensorflow.keras.layers.Embedding", "tensorflow.keras.layers.Dense", "tensorflow.io.parse_single_example", "tensorflow.io.FixedLenFeature", "tensorflow.optimizers.SGD", "tensorflow.square", "tensorflow.keras.layers.Flatten" ] ]
j3soon/tbparse
[ "4bd87404040fd85bdd72b89fefd21e9c6486d26a", "4bd87404040fd85bdd72b89fefd21e9c6486d26a" ]
[ "tests/test_summary_reader/test_tensor.py", "tests/test_summary_reader/test_edge_cases.py" ]
[ "import os\nimport tempfile\nfrom typing import List\n\nimport pytest\nimport tensorflow as tf\nfrom tbparse import SummaryReader\nfrom tensorboard.backend.event_processing.event_accumulator import TensorEvent\nfrom torch.utils.tensorboard import SummaryWriter\n\nN_RUNS = 3\nN_EVENTS = 5\n\[email protected]\ndef prepare(testdir):\n # Use torch for main tests, logs for tensorboard and tensorboardX are\n # generated in their own tests.\n # Ref: https://pytorch.org/docs/stable/tensorboard.html\n log_dir = os.path.join(testdir.tmpdir, 'run')\n for i in range(N_RUNS):\n writer = SummaryWriter(os.path.join(log_dir, f'run{i}'))\n for j in range(N_EVENTS):\n writer.add_scalar('y=2x+C', j * 2 + i, j, new_style=True)\n writer.add_scalar('y=3x+C', j * 3 + i, j, new_style=True)\n writer.close()\n \"\"\"\n run\n ├── run0\n │ └── events.out.tfevents.<id-1>\n ├── run1\n │ └── events.out.tfevents.<id-2>\n └── run2\n └── events.out.tfevents.<id-3>\n \"\"\"\n\ndef test_tensorboardX(prepare, testdir):\n pass\n # Note: tensorboardX doesn't support logging tensors.\n\ndef test_tensorflow(prepare, testdir):\n # Prepare Log\n log_dir_th = os.path.join(testdir.tmpdir, 'run')\n tmpdir_tf = tempfile.TemporaryDirectory()\n log_dir_tf = os.path.join(tmpdir_tf.name, 'run')\n for i in range(N_RUNS):\n writer = tf.summary.create_file_writer(os.path.join(log_dir_tf, f'run{i}'))\n writer.set_as_default()\n for j in range(N_EVENTS):\n tf.summary.scalar('y=2x+C', j * 2 + i, j)\n tf.summary.scalar('y=3x+C', j * 3 + i, j)\n writer.close()\n # (default) Parse & Compare\n df_th = SummaryReader(log_dir_th).tensors\n df_tf = SummaryReader(log_dir_tf).tensors\n assert df_th.equals(df_tf)\n # (dir_name) Parse & Compare\n df_th = SummaryReader(log_dir_th, extra_columns={'dir_name'}).tensors\n df_tf = SummaryReader(log_dir_tf, extra_columns={'dir_name'}).tensors\n assert df_th.equals(df_tf)\n # (pivot) Parse & Compare\n df_th = SummaryReader(log_dir_th, pivot=True).tensors\n df_tf = SummaryReader(log_dir_tf, pivot=True).tensors\n assert df_th.equals(df_tf)\n # (pivot & dir_name) Parse & Compare\n df_th = SummaryReader(log_dir_th, pivot=True, extra_columns={'dir_name'}).tensors\n df_tf = SummaryReader(log_dir_tf, pivot=True, extra_columns={'dir_name'}).tensors\n assert df_th.equals(df_tf)\n\ndef get_tmpdir_info(tmpdir):\n log_dir = os.path.join(tmpdir, 'run')\n run_dir = os.path.join(log_dir, 'run0')\n dirs = os.listdir(run_dir)\n assert len(dirs) == 1\n event_filename = dirs[0]\n event_file = os.path.join(run_dir, event_filename)\n d = {\n 'log_dir': log_dir,\n 'run_dir': run_dir,\n 'event_file': event_file,\n 'event_filename': event_filename,\n }\n return d\n\ndef test_event_file_raw(prepare, testdir):\n tmpinfo = get_tmpdir_info(testdir.tmpdir)\n reader = SummaryReader(tmpinfo[\"event_file\"], pivot=True)\n # Test raw functions\n # - Test `raw_tags` and `get_raw_tags`\n assert reader.raw_tags == reader.get_raw_tags()\n assert reader.raw_tags['tensors'] == reader.get_raw_tags('tensors')\n assert set(reader.raw_tags['tensors']) == {'y=2x+C', 'y=3x+C'}\n # - Test `raw_events` and `get_raw_events`\n assert reader.raw_events == reader.get_raw_events()\n assert reader.raw_events['tensors'] == reader.get_raw_events('tensors')\n assert reader.raw_events['tensors']['y=2x+C'] == reader.get_raw_events('tensors', 'y=2x+C')\n # - Test raw event count & type\n events: List[TensorEvent] = reader.get_raw_events('tensors', 'y=2x+C')\n assert len(events) == N_EVENTS\n assert type(events[0]) == TensorEvent\n for i in range(N_EVENTS):\n value = tf.make_ndarray(events[i].tensor_proto)\n assert (events[i].step, value) == (i, i * 2)\n\ndef check_others(reader):\n assert len(reader.scalars) == 0\n assert len(reader.histograms) == 0\n assert len(reader.hparams) == 0\n\ndef test_event_file(prepare, testdir):\n tmpinfo = get_tmpdir_info(testdir.tmpdir)\n # Test pivot\n reader = SummaryReader(tmpinfo[\"event_file\"], pivot=True)\n assert reader.tensors.columns.to_list() == ['step', 'y=2x+C', 'y=3x+C']\n assert reader.tensors['step'].to_list() == [i for i in range(N_EVENTS)]\n assert reader.tensors['y=2x+C'].to_list() == [i * 2 for i in range(N_EVENTS)]\n assert reader.tensors['y=3x+C'].to_list() == [i * 3 for i in range(N_EVENTS)]\n check_others(reader)\n # Test additional tag column\n reader = SummaryReader(tmpinfo[\"event_file\"])\n assert reader.tensors.columns.to_list() == ['step', 'tag', 'value']\n assert reader.tensors['step'].to_list()[:N_EVENTS] == [i for i in range(N_EVENTS)]\n assert reader.tensors['step'].to_list()[N_EVENTS:] == [i for i in range(N_EVENTS)]\n assert reader.tensors['tag'].to_list()[:N_EVENTS] == ['y=2x+C'] * N_EVENTS\n assert reader.tensors['tag'].to_list()[N_EVENTS:] == ['y=3x+C'] * N_EVENTS\n assert reader.tensors['value'].to_list()[:N_EVENTS] == [i * 2 for i in range(N_EVENTS)]\n assert reader.tensors['value'].to_list()[N_EVENTS:] == [i * 3 for i in range(N_EVENTS)]\n check_others(reader)\n # Test pivot & additional wall_time column\n reader = SummaryReader(tmpinfo[\"event_file\"], pivot=True, extra_columns={'wall_time'})\n assert reader.tensors.columns.to_list() == ['step', 'y=2x+C', 'y=3x+C', 'wall_time']\n assert len(reader.tensors['wall_time']) == N_EVENTS\n check_others(reader)\n # Test pivot & additional dir_name column\n reader = SummaryReader(tmpinfo[\"event_file\"], pivot=True, extra_columns={'dir_name'})\n assert reader.tensors.columns.to_list() == ['step', 'y=2x+C', 'y=3x+C', 'dir_name']\n assert reader.tensors['dir_name'].to_list() == [''] * N_EVENTS\n check_others(reader)\n # Test pivot & additional file_name column\n reader = SummaryReader(tmpinfo[\"event_file\"], pivot=True, extra_columns={'file_name'})\n assert reader.tensors.columns.to_list() == ['step', 'y=2x+C', 'y=3x+C', 'file_name']\n assert reader.tensors['file_name'].to_list() == [tmpinfo[\"event_filename\"]] * N_EVENTS\n check_others(reader)\n # Test all columns\n reader = SummaryReader(tmpinfo[\"event_file\"], extra_columns={\n 'wall_time', 'dir_name', 'file_name'})\n assert reader.tensors.columns.to_list() == ['step', 'tag', 'value', 'wall_time', 'dir_name', 'file_name']\n assert reader.tensors['step'].to_list()[:N_EVENTS] == [i for i in range(N_EVENTS)]\n assert reader.tensors['step'].to_list()[N_EVENTS:] == [i for i in range(N_EVENTS)]\n assert reader.tensors['tag'].to_list()[:N_EVENTS] == ['y=2x+C'] * N_EVENTS\n assert reader.tensors['tag'].to_list()[N_EVENTS:] == ['y=3x+C'] * N_EVENTS\n assert reader.tensors['value'].to_list()[:N_EVENTS] == [i * 2 for i in range(N_EVENTS)]\n assert reader.tensors['value'].to_list()[N_EVENTS:] == [i * 3 for i in range(N_EVENTS)]\n assert len(reader.tensors['wall_time']) == N_EVENTS * 2\n assert reader.tensors['dir_name'].to_list() == [''] * (N_EVENTS * 2)\n assert reader.tensors['file_name'].to_list() == [tmpinfo[\"event_filename\"]] * (N_EVENTS * 2)\n check_others(reader)\n\ndef test_run_dir(prepare, testdir):\n tmpinfo = get_tmpdir_info(testdir.tmpdir)\n # Test pivot\n reader = SummaryReader(tmpinfo[\"run_dir\"], pivot=True, extra_columns={\n 'wall_time', 'dir_name', 'file_name'})\n assert len(reader.children) == 1\n assert reader.tensors.columns.to_list() == ['step', 'y=2x+C', 'y=3x+C', 'wall_time', 'dir_name', 'file_name']\n assert reader.tensors['step'].to_list() == [i for i in range(N_EVENTS)]\n assert reader.tensors['y=2x+C'].to_list() == [i * 2 for i in range(N_EVENTS)]\n assert reader.tensors['y=3x+C'].to_list() == [i * 3 for i in range(N_EVENTS)]\n assert len(reader.tensors['wall_time']) == N_EVENTS\n assert len(reader.tensors['wall_time'][0]) == 2\n assert reader.tensors['dir_name'].to_list() == [''] * N_EVENTS\n assert reader.tensors['file_name'].to_list() == [tmpinfo[\"event_filename\"]] * N_EVENTS\n check_others(reader)\n # Test all columns\n reader = SummaryReader(tmpinfo[\"run_dir\"], extra_columns={\n 'wall_time', 'dir_name', 'file_name'})\n assert reader.tensors.columns.to_list() == ['step', 'tag', 'value', 'wall_time', 'dir_name', 'file_name']\n assert reader.tensors['step'].to_list()[:N_EVENTS] == [i for i in range(N_EVENTS)]\n assert reader.tensors['step'].to_list()[N_EVENTS:] == [i for i in range(N_EVENTS)]\n assert reader.tensors['tag'].to_list()[:N_EVENTS] == ['y=2x+C'] * N_EVENTS\n assert reader.tensors['tag'].to_list()[N_EVENTS:] == ['y=3x+C'] * N_EVENTS\n assert reader.tensors['value'].to_list()[:N_EVENTS] == [i * 2 for i in range(N_EVENTS)]\n assert reader.tensors['value'].to_list()[N_EVENTS:] == [i * 3 for i in range(N_EVENTS)]\n assert len(reader.tensors['wall_time']) == N_EVENTS * 2\n assert reader.tensors['dir_name'].to_list() == [''] * (N_EVENTS * 2)\n assert reader.tensors['file_name'].to_list() == [tmpinfo[\"event_filename\"]] * (N_EVENTS * 2)\n check_others(reader)\n\ndef test_log_dir(prepare, testdir):\n tmpinfo = get_tmpdir_info(testdir.tmpdir)\n # Test pivot\n reader = SummaryReader(tmpinfo[\"log_dir\"], pivot=True, extra_columns={\n 'dir_name', 'file_name'})\n assert len(reader.children) == N_RUNS\n assert reader.tensors.columns.to_list() == ['step', 'y=2x+C', 'y=3x+C', 'dir_name', 'file_name']\n for i in range(N_RUNS):\n run_dir = os.path.join(tmpinfo[\"log_dir\"], f'run{i}')\n dirs = os.listdir(run_dir)\n assert len(dirs) == 1\n event_filename = dirs[0]\n s, e = i*N_EVENTS, (i+1)*N_EVENTS\n assert reader.tensors['step'][s:e].to_list() == [j for j in range(N_EVENTS)]\n assert reader.tensors['y=2x+C'][s:e].to_list() == [j * 2 + i for j in range(N_EVENTS)]\n assert reader.tensors['y=3x+C'][s:e].to_list() == [j * 3 + i for j in range(N_EVENTS)]\n assert reader.tensors['dir_name'][s:e].to_list() == [f'run{i}'] * N_EVENTS\n assert reader.tensors['file_name'][s:e].to_list() == [event_filename] * N_EVENTS\n check_others(reader)\n # Test all columns\n reader = SummaryReader(tmpinfo[\"log_dir\"], extra_columns={\n 'wall_time', 'dir_name', 'file_name'})\n assert reader.tensors.columns.to_list() == ['step', 'tag', 'value', 'wall_time', 'dir_name', 'file_name']\n for i in range(N_RUNS):\n run_dir = os.path.join(tmpinfo[\"log_dir\"], f'run{i}')\n dirs = os.listdir(run_dir)\n assert len(dirs) == 1\n event_filename = dirs[0]\n s1, e1 = i*(N_EVENTS*2), i*(N_EVENTS*2) + N_EVENTS\n s2, e2 = (i+1)*(N_EVENTS*2) - N_EVENTS, (i+1)*(N_EVENTS*2)\n assert reader.tensors['step'].to_list()[s1:e1] == [j for j in range(N_EVENTS)]\n assert reader.tensors['step'].to_list()[s2:e2] == [j for j in range(N_EVENTS)]\n assert reader.tensors['tag'].to_list()[s1:e1] == ['y=2x+C'] * N_EVENTS\n assert reader.tensors['tag'].to_list()[s2:e2] == ['y=3x+C'] * N_EVENTS\n assert reader.tensors['value'].to_list()[s1:e1] == [j * 2 + i for j in range(N_EVENTS)]\n assert reader.tensors['value'].to_list()[s2:e2] == [j * 3 + i for j in range(N_EVENTS)]\n assert len(reader.tensors['wall_time']) == N_RUNS * N_EVENTS * 2\n assert reader.tensors['dir_name'][s1:e2].to_list() == [f'run{i}'] * (N_EVENTS * 2)\n assert reader.tensors['file_name'][s1:e2].to_list() == [event_filename] * (N_EVENTS * 2)\n check_others(reader)\n", "import os\n\nimport pytest\nfrom tbparse import SummaryReader\nfrom torch.utils.tensorboard import SummaryWriter\n\nN_RUNS = 3\nN_EVENTS = 5\n\[email protected]\ndef prepare(testdir):\n # Ref: https://pytorch.org/docs/stable/tensorboard.html\n log_dir = os.path.join(testdir.tmpdir, 'run')\n for i in range(N_RUNS):\n run_dir = os.path.join(log_dir, f'run{i}')\n writer = SummaryWriter(run_dir)\n # writer.add_hparams({'name': 'test', 'run id': i}, {}, run_name='.')\n for j in range(N_EVENTS):\n writer.add_scalar('y=2x+C', j * 2 + i, j)\n writer.add_scalar('y=3x+C', j * 3 + i, j)\n # non event file\n with open(os.path.join(run_dir, 'temp.txt'), 'w') as file:\n file.write('temp')\n # empty directory\n os.mkdir(os.path.join(run_dir, 'tmpdir'))\n writer.close()\n temp_dir = os.path.join(testdir.tmpdir, 'temp')\n os.mkdir(temp_dir)\n with open(os.path.join(temp_dir, 'temp.txt'), 'w') as file:\n file.write('temp')\n \"\"\"\n run\n ├── run0\n │ └── events.out.tfevents.<id-1>\n ├── run1\n │ └── events.out.tfevents.<id-2>\n └── run2\n └── events.out.tfevents.<id-3>\n \"\"\"\n\ndef test_empty_dir(prepare, testdir):\n temp_dir = os.path.join(testdir.tmpdir, 'temp')\n reader = SummaryReader(temp_dir)\n assert reader.scalars.columns.to_list() == []\n\ndef test_event_file(prepare, testdir):\n log_dir = os.path.join(testdir.tmpdir, 'run')\n run_dir = os.path.join(log_dir, 'run0')\n dirs = sorted(os.listdir(run_dir))\n assert len(dirs) == 3\n event_filename = dirs[0]\n # Test pivot\n reader = SummaryReader(run_dir, pivot=True, extra_columns={\n 'wall_time', 'dir_name', 'file_name'})\n assert len(reader.children) == 3\n assert reader.scalars.columns.to_list() == ['step', 'y=2x+C', 'y=3x+C', 'wall_time', 'dir_name', 'file_name']\n assert reader.scalars['step'].to_list() == [i for i in range(N_EVENTS)]\n assert reader.scalars['y=2x+C'].to_list() == [i * 2 for i in range(N_EVENTS)]\n assert reader.scalars['y=3x+C'].to_list() == [i * 3 for i in range(N_EVENTS)]\n assert len(reader.scalars['wall_time']) == N_EVENTS\n assert len(reader.scalars['wall_time'][0]) == 2\n assert reader.scalars['dir_name'].to_list() == [''] * N_EVENTS\n assert reader.scalars['file_name'].to_list() == [event_filename] * N_EVENTS\n\ndef test_event_types(prepare, testdir):\n log_dir = os.path.join(testdir.tmpdir, 'run')\n run_dir = os.path.join(log_dir, 'run0')\n dirs = sorted(os.listdir(run_dir))\n assert len(dirs) == 3\n event_filename = dirs[0]\n event_file = os.path.join(run_dir, event_filename)\n # Test default\n reader = SummaryReader(event_file, event_types={'tensors'})\n assert reader.scalars.columns.to_list() == []\n\ndef test_get_tags(prepare, testdir):\n log_dir = os.path.join(testdir.tmpdir, 'run')\n run_dir = os.path.join(log_dir, 'run0')\n dirs = sorted(os.listdir(run_dir))\n assert len(dirs) == 3\n event_filename = dirs[0]\n event_file = os.path.join(run_dir, event_filename)\n # Test default\n reader = SummaryReader(event_file)\n assert reader.tags['scalars'] == ['y=2x+C', 'y=3x+C']\n assert reader.get_tags('scalars') == ['y=2x+C', 'y=3x+C']\n reader = SummaryReader(run_dir)\n assert reader.tags['scalars'] == ['y=2x+C', 'y=3x+C']\n assert reader.get_tags('scalars') == ['y=2x+C', 'y=3x+C']\n\n# TODO: tags duplicate with file_name, dir_name, etc.\n# TODO: log single letter?\n# TODO: order difference when pd.concat\n" ]
[ [ "tensorflow.make_ndarray", "tensorflow.summary.scalar" ], [ "torch.utils.tensorboard.SummaryWriter" ] ]
sen-pai/Sub-Sequence-Segmentation
[ "2cc421d5c608149d93ad09f19599bfc681cc7850" ]
[ "both_noisy_training_script.py" ]
[ "import os, sys\nimport numpy as np\nimport random\nimport copy\nimport shutil\nfrom collections import defaultdict\n\nimport matplotlib.pyplot as plt\n\nplt.rc(\"font\", size=8)\n\nimport argparse\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn as nn\nimport torchvision.utils\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom torch.optim.lr_scheduler import StepLR, CyclicLR\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, datasets, models\n\nfrom dataloader import NoisySineDataset, CleanSineDataset\nfrom models import RNNEncoder, RNNDecoder, Seq2SeqAttn\nfrom data_utils import pad_collate\n\n# comment out warnings if you are testing it out\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\nparser = argparse.ArgumentParser(description=\"RNNAutoEncoder\")\n\nparser.add_argument(\n \"--save-freq\",\n type=int,\n default=5,\n help=\"every x epochs save weights\",\n)\nparser.add_argument(\n \"--batch\",\n type=int,\n default=5,\n help=\"train batch size\",\n)\nparser.add_argument(\n \"--vs\",\n type=float,\n default=0.05,\n help=\"val split\",\n)\nparser.add_argument(\n \"--lr\",\n type=float,\n default=0.0001,\n help=\"initial lr\",\n)\nparser.add_argument(\n \"--exp-name\",\n default=\"testing_nn_hq\",\n help=\"Experiment name\",\n)\nargs = parser.parse_args()\n\n\nimport wandb\n\nos.environ[\"WANDB_NAME\"] = args.exp_name\nwandb.init(project=\"sub_seq\")\nwandb.config.update(args)\nwandb.config.update({\"dataset\": \"noisy_sine_dataset\"})\n\n\n# fix random seeds\ntorch.manual_seed(1)\nnp.random.seed(1)\nrandom.seed(1)\n\nclean_dataset = NoisySineDataset()\nnoisy_dataset = NoisySineDataset()\n\nnoisy_dataloader = DataLoader(\n noisy_dataset,\n batch_size=10,\n shuffle=True,\n num_workers=0,\n drop_last=True,\n collate_fn=pad_collate,\n)\n\nclean_dataloader = DataLoader(\n clean_dataset,\n batch_size=10,\n shuffle=True,\n num_workers=0,\n drop_last=True,\n collate_fn=pad_collate,\n)\n\n\ndef calc_loss(\n prediction,\n target,\n metrics,\n):\n mse_loss = F.mse_loss(prediction, target)\n mae_loss = F.l1_loss(prediction, target)\n metrics[\"MSE\"] += mse_loss.data.cpu().numpy() * target.size(0)\n metrics[\"MAE\"] += mae_loss.data.cpu().numpy() * target.size(0)\n\n return mse_loss\n\n\ndef print_metrics(metrics, epoch_samples, epoch):\n outputs = []\n outputs.append(\"{}:\".format(str(epoch)))\n for k in metrics.keys():\n outputs.append(\"{}: {:4f}\".format(k, metrics[k] / epoch_samples))\n wandb.log({k: metrics[k] / epoch_samples})\n print(\"{}\".format(\", \".join(outputs)))\n\n\ndef train_model(model, optimizer, scheduler, num_epochs=25):\n for epoch in range(num_epochs):\n print(\"Epoch {}/{}\".format(epoch, num_epochs - 1))\n print(\"-\" * 10)\n metrics = defaultdict(float)\n epoch_samples = 0\n\n for ((_, noisy, noisy_lens), (_, clean, clean_lens)) in zip(\n tqdm(noisy_dataloader), clean_dataloader\n ):\n noisy = noisy.to(device)\n clean = clean.to(device)\n\n # forward\n optimizer.zero_grad()\n\n decoded_clean, all_attn = model(\n noisy, encoder_lens=noisy_lens, decoder_lens=clean_lens\n )\n\n loss = calc_loss(clean, decoded_clean, metrics)\n\n loss.backward()\n optimizer.step()\n\n epoch_samples += noisy.size(0)\n\n print_metrics(metrics, epoch_samples, epoch)\n # deep copy the model\n if epoch % args.save_freq == 0:\n print(\"saving model\")\n best_model_wts = copy.deepcopy(model.state_dict())\n weight_name = \"attn_sin_\" + str(epoch) + \".pt\"\n torch.save(best_model_wts, weight_name)\n\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ne = RNNEncoder(input_dim=1, bidirectional=True)\nd = RNNDecoder(\n input_dim=(e.input_size + e.hidden_size * 2),\n hidden_size=e.hidden_size,\n bidirectional=True,\n)\n\nmodel = Seq2SeqAttn(encoder=e, decoder=d).to(device)\n\noptimizer = optim.Adam(model.parameters(), lr=args.lr)\nscheduler = None\n# if args.schedule:\n# scheduler = StepLR(optimizer, step_size=5000, gamma=0.1)\ntrain_model(model, optimizer, scheduler, num_epochs=150)" ]
[ [ "numpy.random.seed", "torch.nn.functional.l1_loss", "torch.manual_seed", "torch.utils.data.DataLoader", "matplotlib.pyplot.rc", "torch.nn.functional.mse_loss", "torch.cuda.is_available", "torch.save" ] ]
an-kramer/exo-intraday
[ "7359f387534063080cf415feee241b3eda22bf91", "7359f387534063080cf415feee241b3eda22bf91" ]
[ "model_full/parameter_estimation.py", "model_full/model_specs.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCopyright 2020 by Anke Kramer\n\nLicensed under the MIT License as stated in the LICENSE file.\n\nThis code supplements the paper \"Exogenous factors for order arrivals \non the intraday electricity market\" by Anke Kramer and Rüdiger Kiesel.\n\"\"\"\n\n#%% update Python path\nimport sys\n\nsys.path.insert(0, '../functions')\n\n#%% imports\nfrom model_specs import Model_intraday as Model\nfrom model_specs_noPV import Model_intraday as Model_noPV\nfrom discretization import Discretization\nimport tools\nimport database as db\nimport optimization as optim\nimport warnings\nimport numpy as np\nfrom multiprocessing import Pool\n\n\n#%% script for executing algorithm\n\n####################### set parameters and load data ##########################\n\n# trading period and product\ndelivery_duration = 60\norder_type = 'market_order'\nob_side = 'buy'\n\n# load dates\ndates_filename = 'examples'\ntoEstimate = tools.load_dates(dates_filename + '_toEstimate.csv')\ndone = tools.load_dates(dates_filename + '_' + ob_side + '_done.csv')\ndelivery_starts = [item for item in toEstimate if item not in done]\n\n\n\nfor num, delivery_start in enumerate(delivery_starts):\n # data import and processing\n \n print('ESTIMATING DATE ' + str(num+1) + '/' + str(len(delivery_starts)) + '...')\n \n try:\n \n # event data\n event_data, start, end, time_horizon = db.get_event_data(delivery_start=delivery_start,\n delivery_duration=delivery_duration,\n order_type=order_type,\n ob_side=ob_side,\n minutes_to_closure=180)\n \n # completely observed data (to be interpolated)\n co_data = [db.get_actuals_series(period_start=start,\n trading_end=end,\n actuals_type='solar',\n minutes_lag=15,\n unit='GWh',\n compute_error=True),\n db.get_actuals_series(period_start=start,\n trading_end=end,\n actuals_type='wind',\n minutes_lag=15,\n unit='GWh',\n compute_error=True),\n db.get_imbalance_data(period_start=start,\n trading_end=end,\n minutes_lag=15,\n unit=\"GWh\")]\n\n \n # set names for building filenames\n model_title = 'FullModel'\n dataset = delivery_start.replace(':','').replace(' ','_')[:-2]\n \n # set flag for no PV (e.g. at night)\n if np.any(np.isnan(co_data[0][:,1])):\n no_pv = True\n co_data = co_data[1:]\n else:\n no_pv = False\n \n \n \n ##################### define model and discretization #########################\n # initialize model\n if no_pv:\n\n model = Model_noPV(event_data,\n co_data,\n time_horizon)\n else:\n\n model = Model(event_data,\n co_data,\n time_horizon)\n \n \n # initialize discretization\n disc = Discretization(model,\n time_horizon)\n \n \n \n ########################## do optimization ####################################\n #define method for optimization\n method = 'Nelder-Mead'\n \n # reuse parameter estimates from self-exciting model\n param_old = tools.load_obj('../model_selfexciting/results/' + ob_side + '/ParamEstimation_SeModel_' + dataset + '.pkl', as_is=True)\n param_old = param_old['x']\n \n # depending on pv flag, set different start parameters\n if no_pv:\n \n # define starting values of model parameters\n param_0 = [np.concatenate((np.array([param_old[0]]), #alpha0\n np.array([10, 10]), # alpha2, alpha3\n param_old[1:])), #alpha4, beta\n np.concatenate((np.array([param_old[0]]), #alpha0\n np.array([-0.4, -2]), # alpha2, alpha3\n param_old[1:])), #alpha4, beta\n np.concatenate((np.array([param_old[0]]), #alpha0\n np.array([5, 5]), # alpha2, alpha3\n param_old[1:])), #alpha4 + beta\n np.concatenate((np.array([param_old[0]]), #alpha0\n np.array([-0.5, -3]), # alpha2, alpha3\n param_old[1:]))] #alpha4, beta\n \n param_names = ['alpha0', 'alpha2', 'alpha3', 'alpha4', 'beta']\n\n \n else:\n \n # define starting values of model parameters\n param_0 = [np.concatenate((np.array([param_old[0]]), #alpha0\n np.array([10, 10, 10]), # alpha1, alpha2, alpha3\n param_old[1:])), #alpha4, beta\n np.concatenate((np.array([param_old[0]]), #alpha0\n np.array([2.5, -0.4, -2]), # alpha1, alpha2, alpha3\n param_old[1:])), #alpha4, beta\n np.concatenate((np.array([param_old[0]]), #alpha0\n np.array([-1, 5, 5]), # alpha1, alpha2, alpha3\n param_old[1:])), #alpha4, beta\n np.concatenate((np.array([param_old[0]]), #alpha0\n np.array([-1, -0.5, -3]), # alpha1, alpha2, alpha3\n param_old[1:]))] #alpha4, beta\n \n param_names = ['alpha0', 'alpha1', 'alpha2', 'alpha3', 'alpha4', 'beta']\n \n \n # define function for optimizer (with only one vector as input)\n def estimate_param(x):\n return -disc.compute_likelihood(intensity_param=x[model.intensity_param_idx],\n co_param=np.array([x[model.co_param_idx]]))\n \n \n # start optimization\n def do_optimization_x0(param_0):\n return optim.do_optimization(objective_function=estimate_param,\n start_param=param_0,\n method='Nelder-Mead',\n callback=None,\n options={'maxfev': 1800},\n save=False,\n filename=ob_side + '/ParamEstimation_' + model_title + '_' + dataset,\n parameter_names=param_names)\n \n pool = Pool(4) \n res = pool.map(do_optimization_x0, param_0)\n pool.close()\n pool.join()\n \n # choose best estimate\n res = tools.choose_best_estimate(res, save=True,\n filename=ob_side + '/ParamEstimation_' + model_title + '_' + dataset + '_notConverged')\n \n # get estimation result\n estimated = res['x'].copy()\n \n \n # standard errors\n se = disc.compute_standard_errors(params=estimated)\n res = tools.save_se(res, se, param_names, save=True, filename=ob_side + '/ParamEstimation_' + model_title + '_' + dataset)\n \n \n # add date to list if estimation was successful\n tools.add_date(date=delivery_start, date_list=done,\n save=True, filename=dates_filename + '_' + ob_side + '_done.csv')\n \n \n except:\n\n warnings.warn(\"Exception raised in parameter estimation.\")\n ", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCopyright 2020 by Anke Kramer\n\nLicensed under the MIT License as stated in the LICENSE file.\n\nThis code supplements the paper \"Exogenous factors for order arrivals \non the intraday electricity market\" by Anke Kramer and Rüdiger Kiesel.\n\"\"\"\n\n#%% imports\nimport numpy as np\nimport bisect as bs\nfrom model import Model\nfrom scipy.interpolate import interp1d\nimport math\n\n\n#%% class definition\n\nclass Model_intraday(Model):\n '''\n Class for point process model in Giesecke2018.\n '''\n\n def __init__(self, event_data, covariates, time_horizon):\n ''' Constructor of model\n event_data: data of point process (timestamps)\n covariates: exogenous data for completely observed factor, which need to\n be interpolated (all data except for point process data)\n time_horizon: time horizon for point process data\n '''\n\n super().__init__(event_data, covariates, 4)\n \n self.time_horizon = time_horizon\n \n # interpolation function for covariate data\n self.interp_covariates = [interp1d(covar[:,0], covar[:,1],\n fill_value=\"extrapolate\",\n kind=\"previous\")\n for covar in covariates]\n \n self.break_points = np.sort(np.unique(np.hstack([covar[:,0] for covar in covariates])))\n \n # indices for parameters\n self.intensity_param_idx = [0,1,2,3,4]\n self.co_param_idx = [5]\n \n # indices of intensity parameters with zero bound\n self.zero_bound_idx_intensity = [4]\n self.zero_bound_idx_co = [0]\n\n \n \n def intensity_deterministic(self, intensity_param, co_param, time):\n ''' Specify intensity function\n intensity_param: parameters for shape of intensity function\n co_param: parameters for completely observed factor\n time: current time\n '''\n\n # index fo Hawkes sum\n idx = bs.bisect_left(self.event_data[:,0],time)\n \n # intensity\n intensity = (np.exp(intensity_param[0] +\n intensity_param[1]*self.interp_covariates[0](time) +\n intensity_param[2]*self.interp_covariates[1](time) +\n intensity_param[3]*self.interp_covariates[2](time)) +\n intensity_param[4]*math.fsum(np.exp(-co_param[0]*(time-self.event_data[:idx,0]))))\n\n return intensity\n \n \n \n def loglik_integral(self, intensity_param, co_param, time_horizon):\n ''' Calculate the integral part of the log-likelihood function at\n least partly analytically.\n '''\n \n def intensity_factors(time):\n return np.exp(intensity_param[0] +\n intensity_param[1]*self.interp_covariates[0](time) +\n intensity_param[2]*self.interp_covariates[1](time) +\n intensity_param[3]*self.interp_covariates[2](time))\n \n diff_bp = np.diff(self.break_points)\n val = intensity_factors(self.break_points[:-1])\n out = np.sum(diff_bp*val)\n \n out -= time_horizon\n \n out += (intensity_param[4]/co_param[0])*math.fsum(1-np.exp(-co_param[0]*(time_horizon-self.event_data[:,0])))\n\n return out\n" ]
[ [ "numpy.isnan", "numpy.array" ], [ "numpy.hstack", "scipy.interpolate.interp1d", "numpy.diff", "numpy.exp", "numpy.sum" ] ]
SarahMit/bullseye_pipeline
[ "4e538793037b9b15a603d1644253b49c7c57f0a0" ]
[ "bullseye_pipeline/utils.py" ]
[ "from nipype.interfaces.base import (\n traits,\n TraitedSpec,\n CommandLineInputSpec,\n CommandLine,\n File\n)\n\nimport os\n\ndef filter_labels(in_file, include_superlist, fixed_id=None, map_pairs_list=None):\n \"\"\"filters-out labels not in the include-superset. Merges labels within superset. Transforms label-ids according to mappings (or fixed id)\"\"\"\n import nibabel as nib\n import numpy as np\n import os\n\n # read label file and create output\n in_nib = nib.load(in_file)\n in0 = in_nib.get_data()\n out0 = np.zeros(in0.shape, dtype=in0.dtype)\n\n # for each group of labels in subset assign them the same id (either 1st in subset or fixed-id, in case given)\n for labels_list in include_superlist:\n for label in labels_list:\n value = labels_list[0]\n if fixed_id is not None: value = fixed_id[0]\n out0[in0 == label] = value\n\n # transform label-ids in case mapping is specified\n if map_pairs_list is not None:\n out1 = np.copy(out0)\n for map_pair in map_pairs_list:\n out1[out0 == map_pair[0]] = map_pair[1]\n\n # save output\n out_final = out0 if not map_pairs_list else out1\n out_nib = nib.Nifti1Image(out_final, in_nib.affine, in_nib.header)\n nib.save(out_nib, 'filtered.nii.gz')\n\n return os.path.abspath('filtered.nii.gz')\n\n\ndef norm_dist_map(orig_file, dest_file):\n \"\"\"compute normalized distance map given an origin and destination masks, resp.\"\"\"\n import os\n import nibabel as nib\n import numpy as np\n from scipy.ndimage.morphology import distance_transform_edt\n\n orig_nib = nib.load(orig_file)\n dest_nib = nib.load(dest_file)\n\n orig = orig_nib.get_data()\n dest = dest_nib.get_data()\n\n dist_orig = distance_transform_edt(np.logical_not(orig.astype(np.bool)))\n dist_dest = distance_transform_edt(np.logical_not(dest.astype(np.bool)))\n\n # normalized distance (0 in origin to 1 in dest)\n ndist = dist_orig / (dist_orig + dist_dest)\n\n ndist_nib = nib.Nifti1Image(ndist.astype(np.float32), orig_nib.affine)\n nib.save(ndist_nib, 'ndist.nii.gz')\n\n return os.path.abspath('ndist.nii.gz')\n\ndef create_shells(ndist_file, n_shells=4, out_file = 'shells.nii.gz', mask_file=None):\n \"\"\"creates specified number of shells given normalized distance map. When mask is given, output in mask == 0 is set to zero\"\"\"\n import os\n import nibabel as nib\n import numpy as np\n\n ndist_nib = nib.load(ndist_file)\n ndist = ndist_nib.get_data()\n\n # if mask is provided, use it to mask-out regions outside it\n if mask_file is not None:\n mask_nib = nib.load(mask_file)\n assert mask_nib.header.get_data_shape() == ndist_nib.header.get_data_shape(), \"Different shapes of images\"\n mask = mask_nib.get_data() > 0\n\n out = np.zeros(ndist.shape, dtype=np.int8)\n\n limits = np.linspace(0., 1., n_shells+1)\n for i in np.arange(n_shells)+1:\n # compute shell and assing increasing label-id\n mask2 = np.logical_and(ndist >= limits[i-1], ndist < limits[i])\n if mask_file is not None: # maskout regions outside mask\n mask2 = np.logical_and(mask2, mask)\n out[mask2] = i\n out[np.isclose(ndist, 0.)] = 0 # need to assign zero to ventricles because of >= above\n\n aux_hdr = ndist_nib.header\n aux_hdr.set_data_dtype(np.int8)\n\n out_nib = nib.Nifti1Image(out, ndist_nib.affine, aux_hdr)\n nib.save(out_nib, out_file)\n\n return os.path.abspath(out_file)\n\n\ndef merge_labels(in1_file, in2_file, out_file='merged.nii.gz', intersect=False):\n \"\"\"merges labels from two input labelmaps, optionally computing intersection\"\"\"\n import os\n import nibabel as nib\n import numpy as np\n\n in1_nib = nib.load(in1_file)\n in2_nib = nib.load(in2_file)\n\n assert in1_nib.header.get_data_shape() == in2_nib.header.get_data_shape(), \"Different shapes of images\"\n\n in1 = in1_nib.get_data()\n in2 = in2_nib.get_data()\n\n out = None\n\n # if not intersection, simply include labels from 'in2' into 'in1'\n if not intersect:\n\n out = np.zeros(in1.shape, dtype=np.int8)\n\n out[:] = in1[:]\n mask = in2 > 0\n out[mask] = in2[mask] # overwrite in1 where in2 > 0\n\n\n aux_hdr = in1_nib.header\n aux_hdr.set_data_dtype(np.int8)\n\n # if intersection, create new label-set as cartesian product of the two sets\n else:\n\n out = np.zeros(in1.shape, dtype=np.int32)\n\n u1_set = np.unique(in1.ravel())\n u2_set = np.unique(in2.ravel())\n\n for u1 in u1_set:\n if u1 == 0: continue\n mask1 = in1 == u1\n for u2 in u2_set:\n if u2 == 0: continue\n mask2 = in2 == u2\n mask3 = np.logical_and(mask1, mask2)\n if not np.any(mask3): continue\n out[mask3] = int(str(u1) + str(u2)) # new label id by concatenating [u1, u2]\n\n aux_hdr = in1_nib.header\n aux_hdr.set_data_dtype(np.int32)\n\n out_nib = nib.Nifti1Image(out, in1_nib.affine, aux_hdr)\n nib.save(out_nib, out_file)\n\n return os.path.abspath(out_file)\n\n\ndef generate_wmparc(incl_file, ndist_file, label_file, incl_labels=None, verbose=False):\n \"\"\"generates wmparc by propagating labels in 'label_file' down the gradient defined by distance map in 'ndist_file'.\n Labels are only propagated in regions where 'incl_file' > 0 (or 'incl_file' == incl_labels[i], if 'incl_labels is provided).\n \"\"\"\n import os\n import nibabel as nib\n import numpy as np\n from scipy.ndimage.morphology import binary_dilation, generate_binary_structure, iterate_structure\n\n connectivity = generate_binary_structure(3, 2)\n\n # read images\n incl_nib = nib.load(incl_file)\n ndist_nib = nib.load(ndist_file)\n label_nib = nib.load(label_file)\n\n assert incl_nib.header.get_data_shape() == ndist_nib.header.get_data_shape() and \\\n incl_nib.header.get_data_shape() == label_nib.header.get_data_shape(), \"Different shapes of mask, ndist and label images\"\n\n # create inclusion mask\n incl_mask = None\n incl_aux = incl_nib.get_data()\n if incl_labels is None:\n incl_mask = incl_aux > 0\n else:\n incl_mask = np.zeros(incl_nib.header.get_data_shape(), dtype=np.bool)\n for lab in incl_labels:\n incl_mask[incl_aux == lab] = True\n\n # get rest of numpy arrays\n ndist = ndist_nib.get_data()\n label = label_nib.get_data()\n\n # get DONE and processing masks\n DONE_mask = label > 0 # this is for using freesurfer wmparc\n proc_mask = np.logical_and(np.logical_and(ndist > 0., ndist < 1.), incl_mask)\n\n # setup the ouptut vol\n out = np.zeros(label.shape, dtype=label.dtype)\n\n # initialize labels in cortex\n out[DONE_mask] = label[DONE_mask] # this is for using freesurfer wmparc\n\n # start with connectivity 1\n its_conn = 1\n\n # main loop\n while not np.all(DONE_mask[proc_mask]):\n\n if verbose:\n print('%0.1f done' % (100. * float(DONE_mask[proc_mask].sum()) / float(proc_mask.sum())))\n\n # loop to increase connectivity for non-reachable TO-DO points\n while True:\n\n # dilate the SOLVED area\n aux = binary_dilation(DONE_mask, iterate_structure(connectivity, its_conn))\n # next TO-DO: close to DONE, in the processing mask and not yet done\n TODO_mask = np.logical_and(np.logical_and(aux, proc_mask), np.logical_not(DONE_mask))\n\n if TODO_mask.sum() > 0:\n break\n\n if verbose:\n print('Non-reachable points. Increasing connectivity')\n\n its_conn += 1\n\n # sort TO-DO points by ndist\n Idx_TODO = np.argwhere(TODO_mask)\n Idx_ravel = np.ravel_multi_index(Idx_TODO.T, label.shape)\n I_sort = np.argsort(ndist.ravel()[Idx_ravel])\n\n # iterate along TO-DO points\n for idx in Idx_TODO[I_sort[::-1]]:\n\n max_dist = -1.\n\n # process each neighbor\n for off in np.argwhere(iterate_structure(connectivity, its_conn)) - its_conn:\n\n try:\n\n # if it is not DONE then skip\n if not DONE_mask[idx[0] + off[0], idx[1] + off[1], idx[2] + off[2]]:\n continue\n\n # if it is the largest distance (ie, largest gradient)\n cur_dist = ndist[idx[0] + off[0], idx[1] + off[1], idx[2] + off[2]]\n if cur_dist > max_dist:\n out[idx[0], idx[1], idx[2]] = out[idx[0] + off[0], idx[1] + off[1], idx[2] + off[2]]\n max_dist = cur_dist\n\n except:\n print('something wrong with neighbor at: (%d, %d, %d)' % (\n idx[0] + off[0], idx[1] + off[1], idx[2] + off[2]))\n pass\n\n if max_dist < 0.: print(\"something went wrong with point: (%d, %d, %d)\" % (idx[0], idx[1], idx[2]))\n\n # mark as solved and remove from visited\n DONE_mask[idx[0], idx[1], idx[2]] = True\n\n # # remove labels from cortex (old aparc version)\n # out[dest_mask] = 0\n\n print('Writing output labelmap')\n out_nib = nib.Nifti1Image(out, label_nib.affine, label_nib.header)\n nib.save(out_nib, 'wmparc.nii.gz')\n\n return os.path.abspath('wmparc.nii.gz')\n\n\nclass Annot2LabelInputSpec(CommandLineInputSpec):\n subject = traits.String(desc='subject id', argstr='--subject %s', position=0, mandatory=True)\n hemi = traits.Enum(\"rh\", \"lh\", desc=\"hemisphere [rh | lh]\", position=1, argstr=\"--hemi %s\", mandatory=True)\n lobes = traits.Enum(\"lobes\", desc='lobes type', argstr='--lobesStrict %s', position=2)\n in_annot = traits.File(desc='input annotation file', exists=True)\n\nclass Annot2LabelOutputSpec(TraitedSpec):\n out_annot_file = File(desc = \"lobes annotation file\", exists = True)\n\nclass Annot2Label(CommandLine):\n \"\"\"wrapper for FreeSurfer command-line tool 'mri_annotation2label'\"\"\"\n input_spec = Annot2LabelInputSpec\n output_spec = Annot2LabelOutputSpec\n _cmd = os.path.join(os.environ['FREESURFER_HOME'], 'bin', 'mri_annotation2label')\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['out_annot_file'] = os.path.join(os.path.dirname(self.inputs.in_annot), self.inputs.hemi + \".lobes.annot\")\n return outputs\n\n def _format_arg(self, name, spec, value):\n if(name=='subject'):\n # take only the last part of the subject path\n return spec.argstr % ( os.path.basename(os.path.normpath(self.inputs.subject)))\n\n return super(Annot2Label, self)._format_arg(name, spec, value)\n\n\nclass Aparc2AsegInputSpec(CommandLineInputSpec):\n subject = traits.String(desc='subject id', argstr='--s %s', position=0, mandatory=True)\n annot = traits.String(desc='name of annot file', argstr='--annot %s', position=1, mandatory=True)\n labelwm = traits.Bool(desc='percolate white matter', argstr='--labelwm', position=2)\n dmax = traits.Int(desc='depth to percolate', argstr='--wmparc-dmax %d', position=3)\n rip = traits.Bool(desc='rip unknown label', argstr='--rip-unknown', position=4)\n hypo = traits.Bool(desc='hypointensities as wm', argstr='--hypo-as-wm', position=5)\n out_file = traits.File(desc='output aseg file', argstr='--o %s', position=6)\n in_lobes_rh = traits.File(desc='input lobar file RH', exists=True)\n in_lobes_lh = traits.File(desc='input lobar file LH', exists=True)\n\nclass Aparc2AsegOutputSpec(TraitedSpec):\n out_file = File(desc = \"lobes aseg file\", exists = True)\n\nclass Aparc2Aseg(CommandLine):\n \"\"\"wrapper for FreeSurfer command-line tool 'mri_aparc2aseg'\"\"\"\n input_spec = Aparc2AsegInputSpec\n output_spec = Aparc2AsegOutputSpec\n\n _cmd = os.path.join(os.environ['FREESURFER_HOME'], 'bin', 'mri_aparc2aseg')\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['out_file'] = os.path.abspath(self.inputs.out_file)\n return outputs\n\n def _format_arg(self, name, spec, value):\n if(name=='subject'):\n # take only the last part of the subject path\n return spec.argstr % ( os.path.basename(os.path.normpath(self.inputs.subject)))\n\n return super(Aparc2Aseg, self)._format_arg(name, spec, value)\n\n\n" ]
[ [ "numpy.logical_not", "numpy.linspace", "numpy.arange", "numpy.argwhere", "numpy.all", "numpy.copy", "numpy.any", "scipy.ndimage.morphology.generate_binary_structure", "numpy.ravel_multi_index", "numpy.logical_and", "numpy.zeros", "scipy.ndimage.morphology.iterate_structure", "numpy.isclose" ] ]
ngoyal2707/tutel
[ "677cdd077293968852f41617e9f2a274a233a258" ]
[ "tutel/impls/moe_layer.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nfrom typing import TYPE_CHECKING, Any, Optional, Tuple, Union, cast\n\nimport torch\nfrom torch import Tensor\nimport torch.distributed as dist\nfrom torch.nn import ModuleList\nimport torch.nn.functional as F\n\nfrom ..impls.fast_dispatch import fast_dispatcher\nfrom ..jit_kernels.gating import fast_cumsum_sub_one\n\n\ndef get_world_size(group):\n try:\n return dist.get_world_size(group)\n except:\n return 1\n\ndef get_world_rank(group):\n try:\n return dist.get_rank(group)\n except:\n return 0\n\ndef one_hot_with_dtype(data, num_classes, dtype):\n result = torch.zeros([data.size(0), num_classes], device=data.device, dtype=dtype)\n result.scatter_(1, data.unsqueeze(-1), 1)\n return result\n\n\nclass AllToAll(torch.autograd.Function):\n @staticmethod\n def forward(ctx: Any, group: dist.ProcessGroup, input: Tensor):\n ctx.group = group\n ctx.world_size = get_world_size(group)\n if ctx.world_size <= 1 or AllToAll.skip_a2a:\n return input\n input = input.contiguous()\n output = torch.empty_like(input)\n dist.all_to_all_single(output, input, group=group)\n return output\n\n @staticmethod\n def backward(ctx: Any, grad_output: Tensor):\n if ctx.world_size <= 1 or AllToAll.skip_a2a:\n return (None, grad_output)\n return (None, AllToAll.apply(ctx.group, grad_output))\n\n\ndef load_balance(gates, mask1, num_global_experts, use_fp32):\n if gates.dtype == torch.float32 or use_fp32:\n me = torch.sum(gates.float(), dim=0)\n ce = torch.sum(mask1.to(me.dtype), dim=0)\n l_loss = torch.sum(me * ce) * (num_global_experts / (gates.size(0) * gates.size(0)))\n else:\n me = torch.mean(gates, dim=0)\n ce = torch.mean(mask1.to(gates.dtype), dim=0)\n l_loss = torch.sum(me * ce) * num_global_experts\n return l_loss\n\n\nclass Top1Gate(torch.nn.Module):\n\n def __init__(\n self,\n model_dim,\n num_global_experts,\n capacity_factor=1.0,\n use_fp32=False,\n ):\n super().__init__()\n self.wg = torch.nn.Linear(model_dim, num_global_experts, bias=False)\n self.capacity_factor = capacity_factor\n self.use_fp32 = use_fp32\n self.num_global_experts = num_global_experts\n\n def capacity(self, expected_sample_size):\n if not hasattr(self, 'capacity_int'):\n self.capacity_int = int(self.capacity_factor * ((expected_sample_size + self.num_global_experts - 1) // self.num_global_experts))\n return self.capacity_int\n\n def forward(self, input: torch.Tensor):\n logits = self.wg(input)\n\n indices1_s = torch.argmax(logits, dim=1)\n mask1 = one_hot_with_dtype(indices1_s, num_classes=self.num_global_experts, dtype=indices1_s.dtype)\n\n mask1_ = mask1.to(logits.dtype)\n gates = F.softmax(logits, dim=1)\n gates1_s = (gates * mask1_).sum(dim=1)\n l_loss = load_balance(gates, mask1_, self.num_global_experts, self.use_fp32)\n\n locations1 = fast_cumsum_sub_one(mask1)\n locations1_s = torch.sum(locations1 * mask1, dim=1).to(torch.int32)\n\n return l_loss, [gates1_s, ], [indices1_s.to(torch.int32), ], [locations1_s.to(torch.int32), ]\n\n\nclass Top2Gate(torch.nn.Module):\n \n def __init__(\n self,\n model_dim,\n num_global_experts,\n capacity_factor=1.0,\n use_fp32=False,\n ):\n super().__init__()\n self.wg = torch.nn.Linear(model_dim, num_global_experts, bias=False)\n self.capacity_factor = capacity_factor\n self.use_fp32 = use_fp32\n self.num_global_experts = num_global_experts\n assert self.num_global_experts >= 2, \"You have only 1 expert, while you are using a top-2 gate.\"\n\n def capacity(self, expected_sample_size):\n if not hasattr(self, 'capacity_int'):\n self.capacity_int = 2 * int(self.capacity_factor * ((expected_sample_size + self.num_global_experts - 1) // self.num_global_experts))\n return self.capacity_int\n\n def forward(self, input: torch.Tensor):\n logits = self.wg(input)\n\n top2_indices = torch.topk(logits, 2, dim=1).indices\n indices1_s, indices2_s = top2_indices.chunk(2, dim=1)\n indices1_s, indices2_s = indices1_s.view(-1), indices2_s.view(-1)\n\n mask1 = one_hot_with_dtype(indices1_s, num_classes=self.num_global_experts, dtype=indices1_s.dtype)\n mask2 = one_hot_with_dtype(indices2_s, num_classes=self.num_global_experts, dtype=indices2_s.dtype)\n\n gates = F.softmax(logits, dim=1)\n gates1_s = (gates * mask1).sum(dim=1)\n gates2_s = (gates * mask2).sum(dim=1)\n l_loss = load_balance(gates, mask1, self.num_global_experts, self.use_fp32)\n\n locations1 = fast_cumsum_sub_one(mask1)\n locations1_s = torch.sum(locations1 * mask1, dim=1).to(torch.int32)\n\n locations2 = fast_cumsum_sub_one(mask2)\n locations2 += torch.sum(mask1, dim=0, keepdim=True)\n locations2_s = torch.sum(locations2 * mask2, dim=1)\n\n # Normalize Gate\n denom_s = torch.clamp(gates1_s + gates2_s, min=torch.finfo(gates2_s.dtype).eps)\n gates1_s /= denom_s\n gates2_s /= denom_s\n\n return l_loss, [gates1_s, gates2_s], [indices1_s.to(torch.int32), indices2_s.to(torch.int32)], [locations1_s.to(torch.int32), locations2_s.to(torch.int32)]\n\n\nclass MOELayer(torch.nn.Module):\n \"\"\"Tutel optimized MOELayer\n\n e.g.\n\n moe_layer = MOELayer('Top2Gate', model_dim, experts={'type': 'ffn', 'hidden_size_per_expert': 1024})\n y = moe_layer(x)\n\n Args:\n gate : the string type of MOE gate, e.g: Top1Gate, Top2Gate\n model_dim : the number of channels for MOE's input tensor\n experts : a dict-type config for builtin expert network, or a torch.nn.Module-type custom expert network\n fp32_gate : option of enabling mixed precision for gate network\n scan_expert_func : allow users to specify a lambda function to iterate each experts param, e.g. `scan_expert_func = lambda name, param: setattr(param, 'expert', True)`\n result_func : allow users to specify a lambda function to format the MoE output and aux_loss, e.g. `result_func = lambda output: (output, output.l_aux)`\n group : specify the explicit communication group of all_to_all\n seeds : a tuple containing a pair of int to specify manual seed of (shared params, local params)\n \"\"\"\n\n def __init__(self, gate_type, model_dim: int, experts = None, fp32_gate = False, scan_expert_func = None, result_func = None, group: Optional[Any] = None, seeds = None):\n super().__init__()\n\n assert model_dim % 2 == 0, \"Model_dim (%s) must be even value, while this Model_dim mod 2 > 0.\" % model_dim\n self.expert_group = group = group if group is not None else dist.group.WORLD\n self.world_size = get_world_size(self.expert_group)\n self.result_func = result_func\n\n import os\n self.skip_moe = (int(os.environ.get('SKIP_MOE', '0')) != 0)\n AllToAll.skip_a2a = (int(os.environ.get('SKIP_A2A', '0')) != 0)\n\n if not isinstance(experts, dict):\n self.experts = cast(ModuleList, experts) if type(experts) == ModuleList else ModuleList(experts)\n self.num_local_experts = len(self.experts)\n else:\n network_type = experts['type']\n if network_type == 'ffn':\n ''' << Fused FFN Experts V1 >> (kernels = 5)\n\n hidden[W, E, C, V] +=! input[W, E, C, M] x expert_fc1[0, E, M, V]\n hidden[W, E, C, V] = hidden[W, E, C, V] + bias_fc1[E, V]\n hidden[W, E, C, V] = activation_fn(hidden[W, E, C, V])\n hidden[W, E, C, M] +=! hidden[W, E, C, V] x expert_fc2[0, E, V, M]\n output[W, E, C, M] = hidden[W, E, C, M] + bias_fc2[E, M]\n\n << Fused FFN Experts V2 >> (kernels = 7)\n\n hidden[E, W, C, M] = input[W, E, C, M]\n hidden[E, W, C, V] +=! hidden[E, W, C, M] x expert_fc1[0, E, M, V]\n hidden[E, W, C, V] = hidden[E, W, C, V] + bias_fc1[E, V]\n hidden[E, W, C, V] = activation_fn(hidden[E, W, C, V])\n hidden[E, W, C, M] +=! hidden[E, W, C, V] x expert_fc2[0, E, V, M]\n hidden[E, W, C, M] = hidden[E, W, C, M] + bias_fc2[E, M]\n output[W, E, C, M] = hidden[E, W, C, M]\n '''\n\n self.num_local_experts = experts.get('count_per_node', 1)\n fused_custom_fn = experts.get('fused_custom_fn')\n if fused_custom_fn is None:\n activation_fn = experts.get('activation_fn', lambda x: F.relu(x))\n\n class FusedExpertsNetwork(torch.nn.Module):\n def __init__(self, model_dim, hidden_size, local_experts):\n super().__init__()\n self.skip_expert = (int(os.environ.get('SKIP_EXPERT', '0')) != 0)\n\n fc1_weight = torch.empty(1, local_experts, model_dim, hidden_size)\n fc2_weight = torch.empty(1, local_experts, hidden_size, model_dim)\n fc1_bias = torch.empty(1, local_experts, 1, hidden_size)\n fc2_bias = torch.empty(1, local_experts, 1, model_dim)\n\n for i in range(local_experts):\n fc1 = torch.nn.Linear(model_dim, hidden_size)\n fc2 = torch.nn.Linear(hidden_size, model_dim)\n fc1_weight[0, i, :, :], fc1_bias[0, i, :, :] = fc1.weight.t(), fc1.bias\n fc2_weight[0, i, :, :], fc2_bias[0, i, :, :] = fc2.weight.t(), fc2.bias\n\n self.model_dim, self.hidden_size, self.local_experts = model_dim, hidden_size, local_experts\n\n if self.local_experts == 1:\n fc1_weight = fc1_weight.view(self.model_dim, self.hidden_size)\n fc2_weight = fc2_weight.view(self.hidden_size, self.model_dim)\n fc1_bias = fc1_bias.view(-1, self.hidden_size)\n fc2_bias = fc2_bias.view(-1, self.model_dim)\n else:\n fc1_weight = fc1_weight.view(self.local_experts, self.model_dim, self.hidden_size)\n fc2_weight = fc2_weight.view(self.local_experts, self.hidden_size, self.model_dim)\n fc1_bias = fc1_bias.view(self.local_experts, 1, self.hidden_size)\n fc2_bias = fc2_bias.view(self.local_experts, 1, self.model_dim)\n\n self.register_parameter(name='fc1_weight', param=torch.nn.Parameter(fc1_weight))\n self.register_parameter(name='fc2_weight', param=torch.nn.Parameter(fc2_weight))\n self.register_parameter(name='fc1_bias', param=torch.nn.Parameter(fc1_bias))\n self.register_parameter(name='fc2_bias', param=torch.nn.Parameter(fc2_bias))\n\n def extra_repr(self):\n return 'model_dim=%d, hidden_size=%d, local_experts=%d' % (self.model_dim, self.hidden_size, self.local_experts)\n\n def forward(self, x):\n if self.skip_expert:\n return x\n if fused_custom_fn is not None:\n x = fused_custom_fn(self, x)\n elif self.local_experts == 1:\n original_shape, x = x.shape, x.view(-1, self.model_dim)\n x = torch.addmm(self.fc1_bias, x, self.fc1_weight)\n x = activation_fn(x)\n x = torch.addmm(self.fc2_bias, x, self.fc2_weight)\n x = x.view(original_shape)\n else:\n x = x.permute(1, 0, 2, 3)\n original_shape, x = x.shape, x.reshape(self.local_experts, -1, self.model_dim)\n x = torch.matmul(x, self.fc1_weight) + self.fc1_bias\n x = activation_fn(x)\n x = torch.matmul(x, self.fc2_weight) + self.fc2_bias\n x = x.reshape(self.local_experts, original_shape[1], original_shape[2], self.model_dim)\n x = x.permute(1, 0, 2, 3)\n return x\n\n def to(self, *args, **kwargs):\n self = super().to(*args, **kwargs)\n self.fc1_weight = self.fc1_weight.to(*args, **kwargs)\n self.fc2_weight = self.fc2_weight.to(*args, **kwargs)\n self.fc1_bias = self.fc1_bias.to(*args, **kwargs)\n self.fc2_bias = self.fc2_bias.to(*args, **kwargs)\n return self\n\n if seeds is not None:\n torch.manual_seed(seeds[1])\n self.experts = ModuleList([FusedExpertsNetwork(model_dim, experts['hidden_size_per_expert'], self.num_local_experts)])\n else:\n raise Exception('Builtin expert type is not recognized: %s' % network_type)\n\n if scan_expert_func is not None:\n for expert in self.experts:\n for n, p in expert.named_parameters():\n scan_expert_func(n, p)\n\n self.num_global_experts = self.world_size * self.num_local_experts\n self.model_dim = model_dim\n\n if gate_type == 'Top1Gate' or (gate_type == 'Top2Gate' and self.num_global_experts == 1):\n gating = Top1Gate\n elif gate_type == 'Top2Gate':\n gating = Top2Gate\n else:\n raise Exception(\"Unrecognized gate_type: %s\" % gate_type)\n\n if seeds is not None:\n torch.manual_seed(seeds[0])\n self.gate = gating(model_dim=model_dim, num_global_experts=self.num_global_experts, use_fp32=fp32_gate)\n\n def get_parameter_iterator(self, param_type):\n if param_type == 'gate':\n return self.gate.named_parameters()\n elif param_type == 'local_experts':\n return self.experts.named_parameters()\n else:\n raise Exception(\"Specified parameter type is not recognized: %s. Valid `param_type` includes: gate, local_experts.\" % param_type)\n\n def forward(self, input: Tensor, **kwargs: Any):\n if self.skip_moe:\n result_output = input\n result_output.l_aux = None\n return self.result_func(result_output) if self.result_func is not None else result_output\n\n original_shape, original_dtype = input.shape, input.dtype\n assert len(input.shape) >= 2, \"Input data must be at least 2D tensor: (s)amples, .., (m)odel_dim\"\n reshaped_input = input.reshape(-1, input.shape[-1])\n reshaped_input_samples = reshaped_input.shape[0]\n\n self.expected_sample_size = getattr(self, 'expected_sample_size', 0) or reshaped_input.size(0)\n if reshaped_input.size(0) != self.expected_sample_size:\n if reshaped_input.size(0) > self.expected_sample_size:\n raise Exception('MoE JIT is designed to work on sample size = %s, while receiving sample size = %s (> %s)' % (self.expected_sample_size, reshaped_input.size(0), self.expected_sample_size))\n else:\n print('MoE is initialized to keep working on sample size = %s, while receiving sample size = %s (will slow down this forward step)' % (self.expected_sample_size, reshaped_input.size(0)))\n pad_input = torch.zeros([self.expected_sample_size, self.model_dim], dtype=reshaped_input.dtype, layout=reshaped_input.layout, device=reshaped_input.device)\n pad_input[:reshaped_input.size(0)] = reshaped_input\n reshaped_input = pad_input\n\n if not hasattr(self, 'param_dtype'):\n self.param_dtype = next(iter(self.experts.parameters())).dtype\n self.capacity = self.gate.capacity(self.expected_sample_size)\n\n reshaped_input = reshaped_input.to(self.param_dtype)\n l_aux, gates_, indices_, locations_ = self.gate(reshaped_input)\n\n if not hasattr(self, '_tutel_dispatcher'):\n self._tutel_dispatcher = fast_dispatcher(num_global_experts=self.num_global_experts, capacity=self.capacity, model_dim=self.model_dim, dispatch_dtype=reshaped_input.dtype)\n\n self._tutel_dispatcher.update(indices_, locations_, gates_)\n\n S, M, GE, C = self.expected_sample_size, self.model_dim, self.num_global_experts, self.capacity\n\n dispatched_input = self._tutel_dispatcher.encode(reshaped_input)\n dispatched_input = AllToAll.apply(self.expert_group, dispatched_input)\n \n dispatched_input = dispatched_input.reshape(self.world_size, self.num_local_experts, -1, M)\n\n if len(self.experts) == 1:\n expert_output = self.experts[0](dispatched_input)\n else:\n chunks = dispatched_input.chunk(self.num_local_experts, dim=1)\n expert_outputs = [expert(chunk) for chunk, expert in zip(chunks, self.experts)]\n expert_output = torch.cat(expert_outputs, dim=1)\n\n expert_output = AllToAll.apply(self.expert_group, expert_output)\n\n expert_output = expert_output.reshape(self.world_size * self.num_local_experts, -1, M)\n\n result_output = self._tutel_dispatcher.decode(expert_output.view(GE * C, M))\n \n result_output = result_output[:reshaped_input_samples, :]\n result_output = result_output.view(original_shape).to(original_dtype)\n self.l_aux = result_output.l_aux = l_aux\n return self.result_func(result_output) if self.result_func is not None else result_output\n\nmoe_layer = MOELayer\n" ]
[ [ "torch.mean", "torch.nn.functional.softmax", "torch.cat", "torch.zeros", "torch.sum", "torch.finfo", "torch.topk", "torch.addmm", "torch.distributed.get_rank", "torch.nn.functional.relu", "torch.empty_like", "torch.nn.Parameter", "torch.empty", "torch.nn.ModuleList", "torch.nn.Linear", "torch.distributed.get_world_size", "torch.distributed.all_to_all_single", "torch.manual_seed", "torch.matmul", "torch.argmax" ] ]
as641651/LinearAlgera-Awareness-Benchmark
[ "1fbbca4229d5869f35a20193c6f23414fbc81ab7" ]
[ "Exp4-Algebraic-Manipulation/distributivity/distributivity_eq9_tf.py" ]
[ "import tensorflow as tf\nimport os\nimport time\nimport numpy as np\n\nclass bcolors:\n WARNING = '\\033[93m'\n ENDC = '\\033[0m'\n\n\n#Check if MKL is enabled\nimport tensorflow.python.framework as tff\nprint(bcolors.WARNING + \"MKL Enabled : \", tff.test_util.IsMklEnabled(), bcolors.ENDC)\n\n\n#Set threads\ntf.config.threading.set_inter_op_parallelism_threads(1)\ntf.config.threading.set_intra_op_parallelism_threads(1)\ntf.config.run_functions_eagerly(False)\n\n#Problem size\nn = 3000\nreps = 10\nDTYPE = tf.float32\n\n\[email protected]\ndef lhs(A,B,C):\n ret = A@B + A@C\n return ret\n\[email protected]\ndef rhs(A,B,C):\n ret = A@(B+C)\n return ret\n\nA = tf.random.normal([n, n], dtype=DTYPE)\nB = tf.random.normal([n, n], dtype=DTYPE)\nC = tf.random.normal([n, n], dtype=DTYPE)\n\n\nfor i in range(reps):\n start = time.perf_counter()\n ret1 = lhs(A,B,C)\n end = time.perf_counter()\n print(\"LHS : \", end-start) \n\n start = time.perf_counter()\n ret2 = rhs(A,B,C)\n end = time.perf_counter()\n print(\"RHS : \", end-start) \n \n tf.print(\"\\n\")\n\n" ]
[ [ "tensorflow.print", "tensorflow.config.threading.set_inter_op_parallelism_threads", "tensorflow.python.framework.test_util.IsMklEnabled", "tensorflow.random.normal", "tensorflow.config.threading.set_intra_op_parallelism_threads", "tensorflow.config.run_functions_eagerly" ] ]
Yamahitsuji/ssd_keras
[ "67bdf4c612a32e36a918490b821e4b63c12687f2" ]
[ "PASCAL_VOC/get_data_from_XML.py" ]
[ "import numpy as np\nimport os\nfrom xml.etree import ElementTree\n\nclass XML_preprocessor(object):\n\n def __init__(self, data_path):\n self.path_prefix = data_path\n self.num_classes = 1\n self.data = dict()\n self._preprocess_XML()\n\n def _preprocess_XML(self):\n filenames = os.listdir(self.path_prefix)\n for filename in filenames:\n tree = ElementTree.parse(self.path_prefix + filename)\n root = tree.getroot()\n bounding_boxes = []\n one_hot_classes = []\n size_tree = root.find('size')\n width = float(size_tree.find('width').text)\n height = float(size_tree.find('height').text)\n expected_obj = ['car']\n for object_tree in root.findall('object'):\n class_name = object_tree.find('name').text\n if class_name not in expected_obj:\n continue\n for bounding_box in object_tree.iter('bndbox'):\n xmin = float(bounding_box.find('xmin').text)/width\n ymin = float(bounding_box.find('ymin').text)/height\n xmax = float(bounding_box.find('xmax').text)/width\n ymax = float(bounding_box.find('ymax').text)/height\n bounding_box = [xmin,ymin,xmax,ymax]\n bounding_boxes.append(bounding_box)\n one_hot_class = self._to_one_hot(class_name)\n one_hot_classes.append(one_hot_class)\n if len(bounding_boxes) == 0:\n continue\n image_name = root.find('filename').text\n bounding_boxes = np.asarray(bounding_boxes)\n one_hot_classes = np.asarray(one_hot_classes)\n image_data = np.hstack((bounding_boxes, one_hot_classes))\n self.data[image_name] = image_data\n\n def _to_one_hot(self, name):\n one_hot_vector = [0] * self.num_classes\n if name == 'car':\n one_hot_vector[0] = 1\n else:\n print('unknown label: %s' %name)\n\n return one_hot_vector\n\n# example on how to use it\nimport pickle\ndata = XML_preprocessor('data/Annotations/').data\npickle.dump(data, open('supervised_data.pkl', 'wb'))\n" ]
[ [ "numpy.asarray", "numpy.hstack" ] ]
StarsMyDestination/OpenPCDet
[ "a9bfdffb2c23f6fe7d4c19085b47ec35728d5884" ]
[ "pcdet/models/backbones_2d/salsanext_backbone.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass ResContextBlock(nn.Module):\n def __init__(self, in_filters, out_filters):\n super(ResContextBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_filters, out_filters, kernel_size=(1, 1), stride=1)\n self.act1 = nn.LeakyReLU()\n\n self.conv2 = nn.Conv2d(out_filters, out_filters, (3, 3), padding=1)\n self.act2 = nn.LeakyReLU()\n self.bn1 = nn.BatchNorm2d(out_filters)\n\n self.conv3 = nn.Conv2d(out_filters, out_filters, (3, 3), dilation=2, padding=2)\n self.act3 = nn.LeakyReLU()\n self.bn2 = nn.BatchNorm2d(out_filters)\n\n def forward(self, x):\n shortcut = self.conv1(x)\n shortcut = self.act1(shortcut)\n\n resA = self.conv2(shortcut)\n resA = self.act2(resA)\n resA1 = self.bn1(resA)\n\n resA = self.conv3(resA1)\n resA = self.act3(resA)\n resA2 = self.bn2(resA)\n\n output = shortcut + resA2\n return output\n\n\nclass ResBlock(nn.Module):\n def __init__(self, in_filters, out_filters, dropout_rate, kernel_size=(3, 3), stride=1,\n pooling=True, drop_out=True):\n super(ResBlock, self).__init__()\n self.pooling = pooling\n self.drop_out = drop_out\n self.conv1 = nn.Conv2d(in_filters, out_filters, kernel_size=(1, 1), stride=stride)\n self.act1 = nn.LeakyReLU()\n\n self.conv2 = nn.Conv2d(in_filters, out_filters, kernel_size=(3, 3), padding=1)\n self.act2 = nn.LeakyReLU()\n self.bn1 = nn.BatchNorm2d(out_filters)\n\n self.conv3 = nn.Conv2d(out_filters, out_filters, kernel_size=(3, 3), dilation=2, padding=2)\n self.act3 = nn.LeakyReLU()\n self.bn2 = nn.BatchNorm2d(out_filters)\n\n self.conv4 = nn.Conv2d(out_filters, out_filters, kernel_size=(2, 2), dilation=2, padding=1)\n self.act4 = nn.LeakyReLU()\n self.bn3 = nn.BatchNorm2d(out_filters)\n\n self.conv5 = nn.Conv2d(out_filters * 3, out_filters, kernel_size=(1, 1))\n self.act5 = nn.LeakyReLU()\n self.bn4 = nn.BatchNorm2d(out_filters)\n\n if pooling:\n self.dropout = nn.Dropout2d(p=dropout_rate)\n self.pool = nn.AvgPool2d(kernel_size=kernel_size, stride=2, padding=1)\n else:\n self.dropout = nn.Dropout2d(p=dropout_rate)\n\n def forward(self, x):\n shortcut = self.conv1(x)\n shortcut = self.act1(shortcut)\n\n resA = self.conv2(x)\n resA = self.act2(resA)\n resA1 = self.bn1(resA)\n\n resA = self.conv3(resA1)\n resA = self.act3(resA)\n resA2 = self.bn2(resA)\n\n resA = self.conv4(resA2)\n resA = self.act4(resA)\n resA3 = self.bn3(resA)\n\n concat = torch.cat((resA1, resA2, resA3), dim=1)\n resA = self.conv5(concat)\n resA = self.act5(resA)\n resA = self.bn4(resA)\n resA = shortcut + resA\n\n if self.pooling:\n if self.drop_out:\n resB = self.dropout(resA)\n else:\n resB = resA\n resB = self.pool(resB)\n\n return resB, resA\n else:\n if self.drop_out:\n resB = self.dropout(resA)\n else:\n resB = resA\n return resB\n\n\nclass UpBlock(nn.Module):\n def __init__(self, in_filters, out_filters, skip_filters, dropout_rate, drop_out=True):\n super(UpBlock, self).__init__()\n self.drop_out = drop_out\n self.in_filters = in_filters\n self.out_filters = out_filters\n\n self.dropout1 = nn.Dropout2d(p=dropout_rate)\n\n self.dropout2 = nn.Dropout2d(p=dropout_rate)\n\n self.conv1 = nn.Conv2d(in_filters // 4 + skip_filters, out_filters, (3, 3), padding=1)\n self.act1 = nn.LeakyReLU()\n self.bn1 = nn.BatchNorm2d(out_filters)\n\n self.conv2 = nn.Conv2d(out_filters, out_filters, (3, 3), dilation=2, padding=2)\n self.act2 = nn.LeakyReLU()\n self.bn2 = nn.BatchNorm2d(out_filters)\n\n self.conv3 = nn.Conv2d(out_filters, out_filters, (2, 2), dilation=2, padding=1)\n self.act3 = nn.LeakyReLU()\n self.bn3 = nn.BatchNorm2d(out_filters)\n\n self.conv4 = nn.Conv2d(out_filters * 3, out_filters, kernel_size=(1, 1))\n self.act4 = nn.LeakyReLU()\n self.bn4 = nn.BatchNorm2d(out_filters)\n\n self.dropout3 = nn.Dropout2d(p=dropout_rate)\n\n def forward(self, x, skip):\n upA = nn.PixelShuffle(2)(x)\n if self.drop_out:\n upA = self.dropout1(upA)\n\n upB = torch.cat((upA, skip), dim=1)\n if self.drop_out:\n upB = self.dropout2(upB)\n\n upE = self.conv1(upB)\n upE = self.act1(upE)\n upE1 = self.bn1(upE)\n\n upE = self.conv2(upE1)\n upE = self.act2(upE)\n upE2 = self.bn2(upE)\n\n upE = self.conv3(upE2)\n upE = self.act3(upE)\n upE3 = self.bn3(upE)\n\n concat = torch.cat((upE1, upE2, upE3), dim=1)\n upE = self.conv4(concat)\n upE = self.act4(upE)\n upE = self.bn4(upE)\n if self.drop_out:\n upE = self.dropout3(upE)\n\n return upE\n\n\nclass SalsaNextBackbone(nn.Module):\n def __init__(self, model_cfg, input_channels):\n super().__init__()\n\n self.model_cfg = model_cfg\n\n self.cs = model_cfg.get('CS', [32, 64, 128, 256, 256])\n\n self.downCntx = ResContextBlock(input_channels, self.cs[0])\n self.downCntx2 = ResContextBlock(self.cs[0], self.cs[0])\n self.downCntx3 = ResContextBlock(self.cs[0], self.cs[0])\n\n self.resBlock1 = ResBlock(self.cs[0], self.cs[1], 0.2, pooling=True, drop_out=False)\n self.resBlock2 = ResBlock(self.cs[1], self.cs[2], 0.2, pooling=True)\n self.resBlock3 = ResBlock(self.cs[2], self.cs[3], 0.2, pooling=True)\n self.resBlock4 = ResBlock(self.cs[3], self.cs[4], 0.2, pooling=True)\n self.resBlock5 = ResBlock(self.cs[4], self.cs[4], 0.2, pooling=False)\n\n self.upBlock1 = UpBlock(self.cs[4], self.cs[2], self.cs[4], 0.2)\n self.upBlock2 = UpBlock(self.cs[2], self.cs[2], self.cs[3], 0.2)\n self.upBlock3 = UpBlock(self.cs[2], self.cs[1], self.cs[2], 0.2)\n self.upBlock4 = UpBlock(self.cs[1], self.cs[0], self.cs[1], 0.2, drop_out=False)\n\n self.num_2d_features = self.cs[0]\n\n def forward(self, batch_dict):\n x = batch_dict['spatial_features']\n\n downCntx = self.downCntx(x)\n downCntx = self.downCntx2(downCntx)\n downCntx = self.downCntx3(downCntx)\n\n down0c, down0b = self.resBlock1(downCntx)\n down1c, down1b = self.resBlock2(down0c)\n down2c, down2b = self.resBlock3(down1c)\n down3c, down3b = self.resBlock4(down2c)\n down5c = self.resBlock5(down3c)\n\n up4e = self.upBlock1(down5c, down3b)\n up3e = self.upBlock2(up4e, down2b)\n up2e = self.upBlock3(up3e, down1b)\n up1e = self.upBlock4(up2e, down0b)\n\n batch_dict['spatial_features_2d'] = up1e\n return batch_dict\n" ]
[ [ "torch.nn.Dropout2d", "torch.cat", "torch.nn.Conv2d", "torch.nn.PixelShuffle", "torch.nn.AvgPool2d", "torch.nn.LeakyReLU", "torch.nn.BatchNorm2d" ] ]
evolution232634/vehicle_detection
[ "1f58aab8150df29a973ba1fd70ed226665df1450" ]
[ "research/object_detection/dataset_tools/create_pet_tf_record.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nr\"\"\"Convert the Oxford pet dataset to TFRecord for object_detection.\n\nSee: O. M. Parkhi, A. Vedaldi, A. Zisserman, C. V. Jawahar\n Cats and Dogs\n IEEE Conference on Computer Vision and Pattern Recognition, 2012\n http://www.robots.ox.ac.uk/~vgg/data/pets/\n\nExample usage:\n python object_detection/dataset_tools/create_pet_tf_record.py \\\n --data_dir=/home/user/pet \\\n --output_dir=/home/user/pet/output\n\"\"\"\n\nimport hashlib\nimport io\nimport logging\nimport os\nimport random\nimport re\n\nfrom lxml import etree\nimport numpy as np\nimport PIL.Image\nimport tensorflow as tf\n\nfrom object_detection.utils import dataset_util\nfrom object_detection.utils import label_map_util\n\nflags = tf.app.flags\nflags.DEFINE_string('data_dir', '', 'Root directory to raw pet dataset.')\nflags.DEFINE_string('output_dir', '', 'Path to directory to output TFRecords.')\nflags.DEFINE_string('label_map_path', 'data/pet_label_map.pbtxt',\n 'Path to label map proto')\nFLAGS = flags.FLAGS\n\n\ndef get_class_name_from_filename(file_name):\n \"\"\"Gets the class name from a file.\n\n Args:\n file_name: The file name to get the class name from.\n ie. \"american_pit_bull_terrier_105.jpg\"\n\n Returns:\n A string of the class name.\n \"\"\"\n match = re.match(r'([A-Za-z_]+)(_[0-9]+\\.jpg)', file_name, re.I)\n return match.groups()[0]\n\n\ndef dict_to_tf_example(data,\n mask_path,\n label_map_dict,\n image_subdirectory,\n ignore_difficult_instances=False):\n \"\"\"Convert XML derived dict to tf.Example proto.\n\n Notice that this function normalizes the bounding box coordinates provided\n by the raw data.\n\n Args:\n data: dict holding PASCAL XML fields for a single image (obtained by\n running dataset_util.recursive_parse_xml_to_dict)\n mask_path: String path to PNG encoded mask.\n label_map_dict: A map from string label names to integers ids.\n image_subdirectory: String specifying subdirectory within the\n Pascal dataset directory holding the actual image data.\n ignore_difficult_instances: Whether to skip difficult instances in the\n dataset (default: False).\n Returns:\n example: The converted tf.Example.\n\n Raises:\n ValueError: if the image pointed to by data['filename'] is not a valid JPEG\n \"\"\"\n img_path = os.path.join(image_subdirectory, data['filename'])\n with tf.gfile.GFile(img_path, 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = PIL.Image.open(encoded_jpg_io)\n if image.format != 'JPEG':\n raise ValueError('Image format not JPEG')\n key = hashlib.sha256(encoded_jpg).hexdigest()\n\n width = int(data['size']['width'])\n height = int(data['size']['height'])\n\n xmins = []\n ymins = []\n xmaxs = []\n ymaxs = []\n classes = []\n classes_text = []\n truncated = []\n poses = []\n difficult_obj = []\n masks = []\n for obj in data['object']:\n difficult = bool(int(obj['difficult']))\n if ignore_difficult_instances and difficult:\n continue\n difficult_obj.append(int(difficult))\n xmin = float(obj['bndbox']['xmin'])\n xmax = float(obj['bndbox']['xmax'])\n ymin = float(obj['bndbox']['ymin'])\n ymax = float(obj['bndbox']['ymax'])\n\n xmins.append(xmin / width)\n ymins.append(ymin / height)\n xmaxs.append(xmax / width)\n ymaxs.append(ymax / height)\n\n class_name = obj['name']\n classes_text.append(class_name.encode('utf8'))\n classes.append(label_map_dict[class_name])\n truncated.append(int(obj['truncated']))\n poses.append(obj['pose'].encode('utf8'))\n\n feature_dict = {\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(\n data['filename'].encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(\n data['filename'].encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),\n 'image/object/truncated': dataset_util.int64_list_feature(truncated),\n 'image/object/view': dataset_util.bytes_list_feature(poses),\n }\n example = tf.train.Example(features=tf.train.Features(feature=feature_dict))\n return example\n\n\ndef create_tf_record(output_filename,\n label_map_dict,\n annotations_dir,\n image_dir,\n examples):\n \"\"\"Creates a TFRecord file from examples.\n\n Args:\n output_filename: Path to where output file is saved.\n label_map_dict: The label map dictionary.\n annotations_dir: Directory where annotation files are stored.\n image_dir: Directory where image files are stored.\n examples: Examples to parse and save to tf record).\n \"\"\"\n writer = tf.python_io.TFRecordWriter(output_filename)\n for idx, example in enumerate(examples):\n if idx % 100 == 0:\n logging.info('On image %d of %d', idx, len(examples))\n xml_path = os.path.join(annotations_dir, 'xmls', example + '.xml')\n mask_path = os.path.join(annotations_dir, 'trimaps', example + '.png')\n\n if not os.path.exists(xml_path):\n logging.warning('Could not find %s, ignoring example.', xml_path)\n continue\n with tf.gfile.GFile(xml_path, 'r') as fid:\n xml_str = fid.read()\n xml = etree.fromstring(xml_str)\n data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']\n\n try:\n tf_example = dict_to_tf_example(\n data, mask_path, label_map_dict, image_dir)\n writer.write(tf_example.SerializeToString())\n except ValueError:\n logging.warning('Invalid example: %s, ignoring.', xml_path)\n\n writer.close()\n\n\n# TODO(derekjchow): Add test for pet/PASCAL main files.\ndef main(_):\n data_dir = FLAGS.data_dir\n label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path)\n\n logging.info('Reading from Pet dataset.')\n image_dir = os.path.join(data_dir, 'images')\n annotations_dir = os.path.join(data_dir, 'annotations')\n examples_path = os.path.join(annotations_dir, 'trainval.txt')\n examples_list = dataset_util.read_examples_list(examples_path)\n\n # Test images are not included in the downloaded data set, so we shall perform\n # our own split.\n random.seed(42)\n random.shuffle(examples_list)\n num_examples = len(examples_list)\n num_train = int(0.7 * num_examples)\n train_examples = examples_list[:num_train]\n val_examples = examples_list[num_train:]\n logging.info('%d training and %d validation examples.',\n len(train_examples), len(val_examples))\n\n train_output_path = os.path.join(FLAGS.output_dir, 'pet_train.record')\n val_output_path = os.path.join(FLAGS.output_dir, 'pet_val.record')\n \n create_tf_record(train_output_path, label_map_dict, annotations_dir,\n image_dir, train_examples)\n create_tf_record(val_output_path, label_map_dict, annotations_dir,\n image_dir, val_examples)\n\n\nif __name__ == '__main__':\n tf.app.run()\n" ]
[ [ "tensorflow.app.run", "tensorflow.gfile.GFile", "tensorflow.train.Features", "tensorflow.python_io.TFRecordWriter" ] ]
C-bowman/mesa
[ "9ee9166e1f5598b43dc257c9d0b1d6903368e834" ]
[ "settings.py" ]
[ "\n\n# ----------------------------------------------------------------------------\n# directory settings\n# ----------------------------------------------------------------------------\n\n# directory where a reference SOLPS run is stored\nsolps_ref_directory = '/pfs/work/g2hjame/solps-iter/runs/TCV_58196_small/ref_clean/'\n\n# file name in which the training data will be stored\ntraining_data_file = 'training_data.h5'\n\n\n# ----------------------------------------------------------------------------\n# diagnostics settings\n# ----------------------------------------------------------------------------\nfrom numpy import load\nfrom sims.instruments import ThomsonScattering\n\ninstrument_data = load('# instrument data path #')\nTS = ThomsonScattering(\n R=instrument_data['R'],\n z=instrument_data['z'],\n weights=instrument_data['weights'],\n measurements=load('# measurement data path #')\n)\n\ndiagnostics = [TS]\n\n\n# ----------------------------------------------------------------------------\n# SOLPS settings\n# ----------------------------------------------------------------------------\n\n# Number of cores to execute SOLPS on\nsolps_n_proc = 6\n\n# Sets whether the divertor transport coefficients are overridden by those\n# used for the PFR\nset_divertor_transport = True\n\n# The range over which the transport coefficient profiles are defined\ntransport_profile_bounds = (-0.1, 0.1)\n\n# the number of SOLPS runs which will be launched in parallel during initial sampling\nconcurrent_runs = 10\n\n# Number of hours after which a SOLPS run will be automatically cancelled\nsolps_timeout_hours = 24\n\n\n# ----------------------------------------------------------------------------\n# gaussian-process regression settings\n# ----------------------------------------------------------------------------\n\n# Boolean flag to set whether cross-validation should be used in place\n# of the marginal likelihood to select the GP hyper-parameters\ncross_validation = False\n\n# Import one of the covariance functions from gp_tools as the\n# covariance_kernel variable so it can be passed to the GP.\nfrom inference.gp import SquaredExponential\ncovariance_kernel = SquaredExponential\n\n# Set the lower and upper bounds for the log-scale hyper-parameters\n# of the covariance kernel.\nlog_scale_bounds = (-6.0, 4.0)\n\n# Choose whether the errors on the experimental data are treated either as\n# Gaussian, Cauchy or Laplace:\nerror_model = 'cauchy'\n\n\n\n\n\n# ----------------------------------------------------------------------------\n# optimiser settings\n# ----------------------------------------------------------------------------\n\n# Number of random-search evaluations which will be used to create an\n# initial set of training data for the GP-optimisation\ninitial_sample_count = 30\n\n# Maximum number of iterations after which the optimisation terminates\nmax_iterations = 200\n\n# specifies which metric is used to select new proposed evaluations\nfrom inference.gp import UpperConfidenceBound\nacquisition_function = UpperConfidenceBound(kappa=1.)\n\n# Select whether or not a trust-region approach is used.\ntrust_region = True\n\n# The width of the trust-region.\ntrust_region_width = 0.08\n\n# Lower & upper bounds placed on the values of the\n# profile model parameters\noptimisation_bounds = {\n # Chi-profile parameter boundaries\n 'chi_boundary_left' : (0., 2.), # left boundary height from barrier level\n 'chi_boundary_right' : (0., 2.), # right boundary height from barrier level\n 'chi_frac_left' : (0., 1.), # left-middle height as a fraction of barrier-boundary gap\n 'chi_frac_right' : (0., 1.), # right-middle height as a fraction of barrier-boundary gap\n 'chi_barrier_centre' : (-0.01, 0.01), # transport barrier centre\n 'chi_barrier_height' : (0.05, 0.4), # transport barrier height\n 'chi_barrier_width' : (0.002, 0.04), # transport barrier width\n 'chi_gap_left' : (1e-3, 0.05), # radius gap between left-midpoint and transport barrier\n 'chi_gap_right' : (1e-3, 0.05), # radius gap between right-midpoint and transport barrier\n\n # D-profile parameter boundaries\n 'D_boundary_left' : (0., 2.), # left boundary height from barrier level\n 'D_boundary_right' : (0., 2.), # right boundary height from barrier level\n 'D_frac_left' : (0., 1.), # left-middle height as a fraction of barrier-boundary gap\n 'D_frac_right' : (0., 1.), # right-middle height as a fraction of barrier-boundary gap\n 'D_barrier_centre' : (-0.01, 0.01), # transport barrier centre\n 'D_barrier_height' : (0.05, 0.4), # transport barrier height\n 'D_barrier_width' : (0.002, 0.04), # transport barrier width\n 'D_gap_left' : (1e-3, 0.05), # radius gap between left-midpoint and transport barrier\n 'D_gap_right' : (1e-3, 0.05), # radius gap between right-midpoint and transport barrier\n}\n\n# The 'fixed_parameter_values' dictionary allows a sub-set of the parameters to be\n# fixed at particular values, thereby removing them from the optimisation problem.\n\n# If the value in the dictionary is set to 'None', that parameter will be optimised as normal.\n# If any value other than 'None' is given, the parameter will be fixed and the given value\n# will be used in running SOLPS.\n\nfixed_parameter_values = {\n # Chi-profile parameters\n 'chi_boundary_left' : None, # left boundary height from barrier level\n 'chi_boundary_right' : None, # right boundary height from barrier level\n 'chi_frac_left' : None, # left-middle height as a fraction of barrier-boundary gap\n 'chi_frac_right' : None, # right-middle height as a fraction of barrier-boundary gap\n 'chi_barrier_centre' : None, # transport barrier centre\n 'chi_barrier_height' : None, # transport barrier height\n 'chi_barrier_width' : None, # transport barrier width\n 'chi_gap_left' : None, # radius gap between left-midpoint and transport barrier\n 'chi_gap_right' : None, # radius gap between right-midpoint and transport barrier\n\n # D-profile parameters\n 'D_boundary_left' : None, # left boundary height from barrier level\n 'D_boundary_right' : None, # right boundary height from barrier level\n 'D_frac_left' : None, # left-middle height as a fraction of barrier-boundary gap\n 'D_frac_right' : None, # right-middle height as a fraction of barrier-boundary gap\n 'D_barrier_centre' : None, # transport barrier centre\n 'D_barrier_height' : None, # transport barrier height\n 'D_barrier_width' : None, # transport barrier width\n 'D_gap_left' : None, # radius gap between left-midpoint and transport barrier\n 'D_gap_right' : None, # radius gap between right-midpoint and transport barrier\n}\n" ]
[ [ "numpy.load" ] ]
stevelill/segreg
[ "71e213d8f36b7bf28d1975bd17b59f61d9dba174" ]
[ "segreg/data/_testing_util.py" ]
[ "\"\"\"\nRoutines to generate fake data to use for testing purposes.\n\"\"\"\n\n# Author: Steven Lillywhite\n# License: BSD 3 clause\n\n\nimport numpy as np\n\n\ndef generate_fake_data(num_data, x_min, x_max, seed=None):\n \"\"\"\n Generates an 1D array of fake data.\n\n Parameters\n ----------\n num_data: int\n x_min: float\n lower bound for the independent data\n x_max: float\n upper bound for the independent data\n seed: int\n sets the seed for the random number generator\n\n Returns\n -------\n indep: numpy array ndim 1\n data is returned sorted\n \"\"\"\n if seed is not None:\n np.random.seed(seed)\n\n x_range = x_max - x_min\n indep = np.random.random(num_data) * x_range + x_min\n indep = np.sort(indep)\n return indep\n\n\ndef _generate_fake_data_no_noise(num_data, x_min, x_max, func, seed=None):\n \"\"\"\n Generates fake one-dimensional OLS data.\n\n The independent data is generated randomly, and the dependent data is a\n deterministic function of the independent data.\n\n Parameters\n ----------\n num_data: int\n x_min: float\n lower bound for the independent data\n x_max: float\n upper bound for the independent data\n func: a function taking an array and returning an array\n generates the dep data\n seed: int\n sets the seed for the random number generator\n\n Returns\n -------\n indep: numpy array ndim 1\n data is returned sorted\n dep: numpy array ndim 1\n same length as indep\n \"\"\"\n indep = generate_fake_data(num_data, x_min, x_max, seed=seed)\n\n dep = func(indep)\n\n return indep, dep\n\n\ndef generate_fake_data_normal_errors(num_data,\n x_min,\n x_max,\n func,\n std_dev,\n seed=None):\n \"\"\"\n Generates fake one-dimensional OLS data.\n\n The independent data is generated randomly, and the dependent data is a\n function of the independent data plus random noise.\n\n The noise is Gaussian with mean zero and standard deviation as specified by\n the input.\n\n Parameters\n ----------\n num_data: int\n x_min: float\n lower bound for the independent data\n x_max: float\n upper bound for the independent data\n func: a function taking an array and returning an array\n generates the dep data\n std_dev: float\n the standard deviation of the noise\n seed: int\n sets the seed for the random number generator\n\n Returns\n -------\n indep: numpy array ndim 1\n data is returned sorted\n dep: numpy array ndim 1\n same length as indep\n \"\"\"\n\n if seed is not None:\n np.random.seed(seed)\n indep, dep = _generate_fake_data_no_noise(num_data,\n x_min,\n x_max,\n func,\n seed=seed)\n resid = std_dev * np.random.randn(num_data)\n dep += resid\n\n return indep, dep\n" ]
[ [ "numpy.random.randn", "numpy.random.random", "numpy.random.seed", "numpy.sort" ] ]
iVibudh/stock-prediction
[ "234bd7b1e67e6e2d9c728dce1851e020aab0662e", "234bd7b1e67e6e2d9c728dce1851e020aab0662e" ]
[ "email-updates-stonks-AI-ML/venv/Lib/site-packages/pandas_datareader/tests/av/test_av_time_series.py", "email-updates-stonks-AI-ML/venv/Lib/site-packages/pandas_datareader/tests/test_utils.py" ]
[ "from datetime import datetime\nimport os\n\nimport pandas as pd\nimport pytest\n\nfrom pandas_datareader import data as web\nfrom pandas_datareader._utils import RemoteDataError\n\nTEST_API_KEY = os.getenv(\"ALPHAVANTAGE_API_KEY\")\nTEST_API_KEY = None if not TEST_API_KEY else TEST_API_KEY\n\npytestmark = [\n pytest.mark.requires_api_key,\n pytest.mark.alpha_vantage,\n pytest.mark.skipif(TEST_API_KEY is None, reason=\"ALPHAVANTAGE_API_KEY not set\"),\n]\n\n\nclass TestAVTimeSeries(object):\n @classmethod\n def setup_class(cls):\n pytest.importorskip(\"lxml\")\n cls.col_index_adj = pd.Index(\n [\n \"open\",\n \"high\",\n \"low\",\n \"close\",\n \"adjusted close\",\n \"volume\",\n \"dividend amount\",\n ]\n )\n cls.col_index = pd.Index([\"open\", \"high\", \"low\", \"close\", \"volume\"])\n\n @property\n def start(self):\n return datetime(2015, 2, 9)\n\n @property\n def end(self):\n return datetime(2017, 5, 24)\n\n def test_av_bad_symbol(self):\n with pytest.raises((ValueError, RemoteDataError)):\n web.DataReader(\n \"BADTICKER\",\n \"av-daily\",\n start=self.start,\n end=self.end,\n retry_count=6,\n pause=20.5,\n )\n\n def test_av_daily(self):\n df = web.DataReader(\n \"AAPL\",\n \"av-daily\",\n start=self.start,\n end=self.end,\n retry_count=6,\n pause=20.5,\n )\n assert df.columns.equals(self.col_index)\n assert len(df) == 578\n assert df[\"volume\"][-1] == 19178000\n\n expected1 = df.loc[\"2017-02-09\"]\n assert expected1[\"close\"] == 132.42\n assert expected1[\"high\"] == 132.445\n\n expected2 = df.loc[\"2017-05-24\"]\n assert expected2[\"close\"] == 153.34\n assert expected2[\"high\"] == 154.17\n\n def test_av_daily_adjusted(self):\n df = web.DataReader(\n \"AAPL\",\n \"av-daily-adjusted\",\n start=self.start,\n end=self.end,\n retry_count=6,\n pause=20.5,\n )\n assert df.columns.equals(\n pd.Index(\n [\n \"open\",\n \"high\",\n \"low\",\n \"close\",\n \"adjusted close\",\n \"volume\",\n \"dividend amount\",\n \"split coefficient\",\n ]\n )\n )\n assert len(df) == 578\n assert df[\"volume\"][-1] == 19178000\n\n expected1 = df.loc[\"2017-02-09\"]\n assert expected1[\"close\"] == 132.42\n assert expected1[\"high\"] == 132.445\n assert expected1[\"dividend amount\"] == 0.57\n assert expected1[\"split coefficient\"] == 1.0\n\n expected2 = df.loc[\"2017-05-24\"]\n assert expected2[\"close\"] == 153.34\n assert expected2[\"high\"] == 154.17\n assert expected2[\"dividend amount\"] == 0.00\n assert expected2[\"split coefficient\"] == 1.0\n\n @staticmethod\n def _helper_df_weekly_monthly(df, adj=False):\n\n expected1 = df.loc[\"2015-02-27\"]\n assert expected1[\"close\"] == 128.46\n assert expected1[\"high\"] == 133.60\n\n expected2 = df.loc[\"2017-03-31\"]\n assert expected2[\"close\"] == 143.66\n assert expected2[\"high\"] == 144.5\n\n def test_av_weekly(self):\n df = web.DataReader(\n \"AAPL\",\n \"av-weekly\",\n start=self.start,\n end=self.end,\n retry_count=6,\n pause=20.5,\n )\n\n assert len(df) == 119\n assert df.iloc[0].name == \"2015-02-13\"\n assert df.iloc[-1].name == \"2017-05-19\"\n assert df.columns.equals(self.col_index)\n self._helper_df_weekly_monthly(df, adj=False)\n\n def test_av_weekly_adjusted(self):\n df = web.DataReader(\n \"AAPL\",\n \"av-weekly-adjusted\",\n start=self.start,\n end=self.end,\n retry_count=6,\n pause=20.5,\n )\n\n assert len(df) == 119\n assert df.iloc[0].name == \"2015-02-13\"\n assert df.iloc[-1].name == \"2017-05-19\"\n assert df.columns.equals(self.col_index_adj)\n self._helper_df_weekly_monthly(df, adj=True)\n\n def test_av_monthly(self):\n df = web.DataReader(\n \"AAPL\",\n \"av-monthly\",\n start=self.start,\n end=self.end,\n retry_count=6,\n pause=20.5,\n )\n\n assert len(df) == 27\n assert df.iloc[0].name == \"2015-02-27\"\n assert df.iloc[-1].name == \"2017-04-28\"\n assert df.columns.equals(self.col_index)\n self._helper_df_weekly_monthly(df, adj=False)\n\n def test_av_monthly_adjusted(self):\n df = web.DataReader(\n \"AAPL\",\n \"av-monthly-adjusted\",\n start=self.start,\n end=self.end,\n retry_count=6,\n pause=20.5,\n )\n\n assert df.columns.equals(self.col_index_adj)\n assert len(df) == 27\n assert df.iloc[0].name == \"2015-02-27\"\n assert df.iloc[-1].name == \"2017-04-28\"\n self._helper_df_weekly_monthly(df, adj=True)\n\n def test_av_intraday(self):\n # Not much available to test, but ensure close in length\n df = web.DataReader(\"AAPL\", \"av-intraday\", retry_count=6, pause=20.5)\n\n assert len(df) > 1000\n assert \"open\" in df.columns\n assert \"close\" in df.columns\n", "import datetime as dt\nimport pandas as pd\nimport pytest\n\nfrom pandas_datareader._utils import _sanitize_dates\n\n\nclass TestUtils(object):\n @pytest.mark.parametrize(\n \"input_date\",\n [\n \"2019-01-01\",\n \"JAN-01-2010\",\n dt.datetime(2019, 1, 1),\n dt.date(2019, 1, 1),\n pd.Timestamp(2019, 1, 1),\n ],\n )\n def test_sanitize_dates(self, input_date):\n expected_start = pd.to_datetime(input_date)\n expected_end = pd.to_datetime(dt.date.today())\n result = _sanitize_dates(input_date, None)\n assert result == (expected_start, expected_end)\n\n def test_sanitize_dates_int(self):\n start_int = 2018\n end_int = 2019\n expected_start = pd.to_datetime(dt.datetime(start_int, 1, 1))\n expected_end = pd.to_datetime(dt.datetime(end_int, 1, 1))\n assert _sanitize_dates(start_int, end_int) == (expected_start, expected_end)\n\n def test_sanitize_invalid_dates(self):\n with pytest.raises(ValueError):\n _sanitize_dates(2019, 2018)\n\n with pytest.raises(ValueError):\n _sanitize_dates(\"2019-01-01\", \"2018-01-01\")\n\n with pytest.raises(ValueError):\n _sanitize_dates(\"20199\", None)\n\n def test_sanitize_dates_defaults(self):\n default_start = pd.to_datetime(dt.date.today() - dt.timedelta(days=365 * 5))\n default_end = pd.to_datetime(dt.date.today())\n assert _sanitize_dates(None, None) == (default_start, default_end)\n" ]
[ [ "pandas.Index" ], [ "pandas.to_datetime", "pandas.Timestamp" ] ]
arcaneknowledge/ArcanOth
[ "c5d55ef6277550b5e73e65bc0b3d126996773da6" ]
[ "tensorflow/load.py" ]
[ "import tensorflow.keras as keras \nimport tensorflow as tf\nfrom tensorflow.python.platform import gfile\n\"\"\"\nmodel = keras.models.load_model(\"mobilenet/saved_model/saved_model.pb\")\nmodel.load_weights(\"mobilenet/checkpoint\")\nprint(\" Model : \"+str(model.summary()))\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.python.platform import gfile\n\nGRAPH_PB_PATH = 'mobilenet/saved_model/saved_model.pb' #path to your .pb file\n\nimport tensorflow as tf\nfrom tensorflow.python.platform import gfile\nwith tf.compat.v1.Session() as sess:\n print(\"load graph\")\n with gfile.FastGFile(GRAPH_PB_PATH,'rb') as f:\n graph_def = tf.compat.v1.GraphDef()\n graph_def.ParseFromString(f.read())\n sess.graph.as_default()\n tf.import_graph_def(graph_def, name='')\n graph_nodes=[n for n in graph_def.node]\n names = []\n for t in graph_nodes:\n names.append(t.name)\n print(names)" ]
[ [ "tensorflow.compat.v1.Session", "tensorflow.python.platform.gfile.FastGFile", "tensorflow.import_graph_def", "tensorflow.compat.v1.GraphDef" ] ]
LCAV/continuous-localization
[ "00d6ed4921291fb7a6c3ca1f9bf00724d28640e5" ]
[ "source/simulation.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nsimulation.py: Generate random trajectories and noisy distance estimates, reconstruct trajectory and save errors. \n\"\"\"\n\nimport argparse\nimport json\nimport logging\nimport os\nimport sys\nimport time\n\nimport cvxpy\nimport numpy as np\n\nfrom global_variables import DIM\nfrom measurements import get_measurements, create_mask, add_noise, create_anchors\nfrom solvers import semidef_relaxation_noiseless, trajectory_recovery\nfrom trajectory import Trajectory\nimport probability as p\n\n\ndef robust_increment(arr, idx):\n \"\"\" increment value of array if inside bound, and set to 1 if previously nan. \"\"\"\n if idx < arr.shape:\n if np.isnan(arr[idx]):\n arr[idx] = 1\n else:\n arr[idx] += 1\n\n\ndef robust_add(arr, idx, value):\n if idx < arr.shape:\n if np.isnan(arr[idx]):\n arr[idx] = 0\n arr[idx] += value\n\n\ndef run_simulation(parameters, outfolder=None, solver=None, verbose=False):\n \"\"\" Run simulation. \n\n :param parameters: Can be either the name of the folder where parameters.json \n is stored, or a new dict of parameters.\n\n \"\"\"\n if type(parameters) == str:\n fname = parameters + 'parameters.json'\n parameters = read_params(fname)\n print('read parameters from file {}.'.format(fname))\n\n elif type(parameters) == dict:\n parameters = parameters\n\n # if we are trying new parameters and saving in a directory that already exists,\n # we need to make sure that the saved parameters are actually the same.\n if outfolder is not None:\n try:\n parameters_old = read_params(outfolder + 'parameters.json')\n parameters['time'] = parameters_old['time']\n assert parameters == parameters_old, 'found conflicting parameters file: {}'.format(outfolder +\n 'parameters.json')\n except FileNotFoundError:\n print('no conflicting parameters file found.')\n except AssertionError as error:\n raise (error)\n else:\n raise TypeError('parameters needs to be folder name or dictionary.')\n\n if 'noise_to_square' not in parameters:\n parameters['noise_to_square'] = False\n\n if 'measure_distances' not in parameters:\n parameters['measure_distances'] = False\n\n if 'sampling_strategy' not in parameters:\n parameters['sampling_strategy'] = 'uniform'\n\n complexities = parameters['complexities']\n anchors = parameters['anchors']\n positions = parameters['positions']\n n_its = parameters['n_its']\n noise_sigmas = parameters['noise_sigmas']\n success_thresholds = parameters['success_thresholds']\n assert len(success_thresholds) == len(noise_sigmas)\n\n if parameters['sampling_strategy'] == 'single_time':\n max_measurements = max(positions)\n else:\n max_measurements = max(positions) * max(anchors)\n\n successes = np.full((len(complexities), len(anchors), len(positions), len(noise_sigmas), max_measurements), np.nan)\n errors = np.full(successes.shape, np.nan)\n relative_errors = np.full(successes.shape, np.nan)\n absolute_errors = np.full(successes.shape, np.nan)\n num_not_solved = np.full(successes.shape, np.nan)\n num_not_accurate = np.full(successes.shape, np.nan)\n squared_distances = []\n\n for c_idx, n_complexity in enumerate(complexities):\n print('n_complexity', n_complexity)\n\n for a_idx, n_anchors in enumerate(anchors):\n print('n_anchors', n_anchors)\n\n for p_idx, n_positions in enumerate(positions):\n print('n_positions', n_positions)\n\n if parameters['sampling_strategy'] == 'single_time':\n n_measurements = n_positions\n else:\n n_measurements = n_positions * n_anchors\n for m_idx, n_missing in enumerate(range(n_measurements)):\n if verbose:\n print('measurements idx', m_idx)\n\n for noise_idx, noise_sigma in enumerate(noise_sigmas):\n indexes = np.s_[c_idx, a_idx, p_idx, noise_idx, m_idx]\n if verbose:\n print(\"noise\", noise_sigma)\n\n # set all values to 0 since we have visited them.\n if np.isnan(successes[indexes]):\n successes[indexes] = 0.0\n if np.isnan(num_not_solved[indexes]):\n num_not_solved[indexes] = 0.0\n if np.isnan(num_not_accurate[indexes]):\n num_not_accurate[indexes] = 0.0\n\n for _ in range(n_its):\n\n trajectory = Trajectory(n_complexity, dim=DIM)\n anchors_coord = create_anchors(DIM, n_anchors)\n trajectory.set_coeffs(seed=None)\n\n basis, D_topright = get_measurements(trajectory, anchors_coord, n_samples=n_positions)\n distances = np.sqrt(D_topright)\n D_topright = add_noise(D_topright, noise_sigma, parameters[\"noise_to_square\"])\n mask = create_mask(n_positions,\n n_anchors,\n strategy=parameters['sampling_strategy'],\n n_missing=n_missing)\n if parameters['measure_distances']:\n squared_distances.extend(D_topright.flatten().tolist())\n D_topright = np.multiply(D_topright, mask)\n\n try:\n assert p.full_rank_condition(\n np.sort(np.sum(mask, axis=0))[::-1], DIM + 1, n_complexity), \"insufficient rank\"\n if (solver is None) or (solver == \"semidef_relaxation_noiseless\"):\n X = semidef_relaxation_noiseless(D_topright,\n anchors_coord,\n basis,\n chosen_solver=cvxpy.CVXOPT)\n P_hat = X[:DIM, DIM:]\n elif solver == 'trajectory_recovery':\n P_hat = trajectory_recovery(D_topright, anchors_coord, basis)\n elif solver == 'weighted_trajectory_recovery':\n P_hat = trajectory_recovery(D_topright, anchors_coord, basis, weighted=True)\n else:\n raise ValueError(solver)\n\n # calculate reconstruction error with respect to distances\n trajectory_estimated = Trajectory(coeffs=P_hat)\n _, D_estimated = get_measurements(trajectory_estimated,\n anchors_coord,\n n_samples=n_positions)\n estimated_distances = np.sqrt(D_estimated)\n\n robust_add(errors, indexes, np.linalg.norm(P_hat - trajectory.coeffs))\n robust_add(relative_errors, indexes,\n np.linalg.norm((distances - estimated_distances) / (distances + 1e-10)))\n robust_add(absolute_errors, indexes, np.linalg.norm(distances - estimated_distances))\n\n assert not np.linalg.norm(P_hat - trajectory.coeffs) > success_thresholds[noise_idx]\n\n robust_increment(successes, indexes)\n\n except cvxpy.SolverError:\n logging.info(\"could not solve n_positions={}, n_missing={}\".format(\n n_positions, n_missing))\n robust_increment(num_not_solved, indexes)\n\n except ZeroDivisionError:\n logging.info(\"could not solve n_positions={}, n_missing={}\".format(\n n_positions, n_missing))\n robust_increment(num_not_solved, indexes)\n\n except np.linalg.LinAlgError:\n robust_increment(num_not_solved, indexes)\n\n except AssertionError as e:\n if str(e) == \"insufficient rank\":\n robust_increment(num_not_solved, indexes)\n else:\n logging.info(\"result not accurate n_positions={}, n_missing={}\".format(\n n_positions, n_missing))\n robust_increment(num_not_accurate, indexes)\n\n errors[indexes] = errors[indexes] / (n_its - num_not_solved[indexes])\n relative_errors[indexes] = relative_errors[indexes] / (n_its - num_not_solved[indexes])\n\n results = {\n 'successes': successes,\n 'num-not-solved': num_not_solved,\n 'num-not-accurate': num_not_accurate,\n 'errors': errors,\n 'relative-errors': relative_errors,\n 'absolute-errors': absolute_errors,\n 'distances': squared_distances\n }\n\n if outfolder is not None:\n print('Done with simulation. Saving results...')\n\n parameters['time'] = time.time()\n\n if not os.path.exists(outfolder):\n os.makedirs(outfolder)\n\n save_params(outfolder + 'parameters.json', **parameters)\n save_results(outfolder + 'result_{}_{}', results)\n else:\n return results\n\n\ndef save_results(filename, results):\n \"\"\" Save results in with increasing number in filename. \"\"\"\n for key, array in results.items():\n for i in range(100):\n try_name = filename.format(key, i)\n if not os.path.exists(try_name + '.npy'):\n try_name = filename.format(key, i)\n np.save(try_name, array, allow_pickle=False)\n print('saved as', try_name)\n break\n else:\n print('exists:', try_name)\n\n\ndef read_results(filestart):\n \"\"\" Read all results saved with above save_results function. \"\"\"\n results = {}\n dirname = os.path.dirname(filestart)\n for filename in os.listdir(dirname):\n full_path = os.path.join(dirname, filename)\n if os.path.isfile(full_path) and filestart in full_path:\n print('reading', full_path)\n key = filename.split('_')[-2]\n new_array = np.load(full_path, allow_pickle=False)\n if key in results.keys():\n old_array = results[key]\n results[key] = np.stack([old_array, new_array[..., np.newaxis]], axis=-1)\n else:\n print('new key:', key)\n results[key] = new_array[..., np.newaxis]\n return results\n\n\ndef save_params(filename, **kwargs):\n for key in kwargs.keys():\n try:\n kwargs[key] = kwargs[key].tolist()\n except AttributeError as e:\n pass\n with open(filename, 'w') as fp:\n json.dump(kwargs, fp, indent=4)\n print('saved as', filename)\n\n\ndef read_params(filename):\n with open(filename, 'r') as fp:\n param_dict = json.load(fp)\n return param_dict\n\n\ndef arg_parser(description=''):\n \"\"\" Parse command line for resultfile argument and do checks. \"\"\"\n\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument('-o', '--outfile', metavar='outfile', type=str, help='location of results file.', default='')\n parser.add_argument('-p', '--plotting', dest='plotting', action='store_true')\n parser.add_argument('-n', '--n_it', metavar='n_it', type=int, help='number of iterations', default=20)\n parser.set_defaults(plotting=False)\n args = parser.parse_args()\n\n outfile = args.outfile\n\n continue_ = ''\n if os.path.isfile(outfile):\n while not continue_ in ['y', 'n']:\n continue_ = input(f'{outfile} exists. Do you want to overwrite? (y/n)')\n if continue_ == 'n':\n sys.exit()\n\n if outfile != '':\n print(f'Saving results as {outfile}.')\n else:\n print(f'Not saving results.')\n return outfile, args.plotting, args.n_it\n" ]
[ [ "numpy.sqrt", "numpy.multiply", "numpy.isnan", "numpy.linalg.norm", "numpy.stack", "numpy.save", "numpy.full", "numpy.load", "numpy.sum" ] ]
ethanr-2000/deep-fry-bot
[ "1a1598915ae216f3fa1cab0cf5620028cea22a64" ]
[ "src/deeppyer/__init__.py" ]
[ "from collections import namedtuple\nfrom io import BytesIO\nimport pkgutil\nfrom typing import Tuple\nfrom PIL import Image, ImageOps, ImageEnhance\nimport cv2\nimport numpy\n\n__all__ = ('Colour', 'ColourTuple', 'DefaultColours', 'deepfry')\n\nColour = Tuple[int, int, int]\nColourTuple = Tuple[Colour, Colour]\n\n\nclass DefaultColours:\n \"\"\"Default colours provided for deepfrying\"\"\"\n red = ((254, 0, 2), (255, 255, 15))\n blue = ((36, 113, 229), (255,) * 3)\n\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')\nflare_img = Image.open(BytesIO(pkgutil.get_data(__package__, 'flare.png')))\n\nFlarePosition = namedtuple('FlarePosition', ['x', 'y', 'size'])\n\n\ndef deepfry(img: Image, *, colours: ColourTuple = DefaultColours.red, flares: bool = True) -> Image:\n \"\"\"\n Deepfry a given image.\n\n Parameters\n ----------\n img : `Image`\n Image to manipulate.\n colours : `ColourTuple`, optional\n A tuple of the colours to apply on the image.\n flares : `bool`, optional\n Whether or not to try and detect faces for applying lens flares.\n\n Returns\n -------\n `Image`\n Deepfried image.\n \"\"\"\n img = img.copy().convert('RGB')\n flare_positions = []\n\n if flares:\n opencv_img = cv2.cvtColor(numpy.array(img), cv2.COLOR_RGB2GRAY)\n\n # Crush image to hell and back\n img = img.convert('RGB')\n width, height = img.width, img.height\n img = img.resize((int(width ** .75), int(height ** .75)), resample=Image.LANCZOS)\n img = img.resize((int(width ** .88), int(height ** .88)), resample=Image.BILINEAR)\n img = img.resize((int(width ** .9), int(height ** .9)), resample=Image.BICUBIC)\n img = img.resize((width, height), resample=Image.BICUBIC)\n img = ImageOps.posterize(img, 4)\n\n # Generate colour overlay\n r = img.split()[0]\n r = ImageEnhance.Contrast(r).enhance(2.0)\n r = ImageEnhance.Brightness(r).enhance(1.5)\n\n r = ImageOps.colorize(r, colours[0], colours[1])\n\n # Overlay red and yellow onto main image and sharpen the hell out of it\n img = Image.blend(img, r, 0.75)\n img = ImageEnhance.Sharpness(img).enhance(100.0)\n\n # Apply flares on any detected eyes\n for flare in flare_positions:\n flare_transformed = flare_img.copy().resize((flare.size,) * 2, resample=Image.BILINEAR)\n img.paste(flare_transformed, (flare.x, flare.y), flare_transformed)\n\n return img\n" ]
[ [ "numpy.array" ] ]
bethgelab/decompose
[ "b6ad3e3d1a2d049f1853cdc309ad042293415ad1" ]
[ "decompose/distributions/exponentialAlgorithms.py" ]
[ "from typing import Dict\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow import Tensor\n\nfrom decompose.distributions.algorithms import Algorithms\n\n\nclass ExponentialAlgorithms(Algorithms):\n\n @classmethod\n def sample(cls, parameters: Dict[str, Tensor], nSamples: Tensor) -> Tensor:\n beta = parameters[\"beta\"]\n exp = tf.distributions.Exponential(rate=1./beta)\n r = exp.sample(sample_shape=(nSamples,))\n return(r)\n\n @classmethod\n def mode(cls, parameters: Dict[str, Tensor]) -> Tensor:\n mode = tf.zeros_like(parameters[\"beta\"])\n return(mode)\n\n @classmethod\n def pdf(cls, parameters: Dict[str, Tensor], data: Tensor) -> Tensor:\n beta = parameters[\"beta\"]\n # TODO: once tensorflow issue #20737 is resolved use the\n # commented out code to calculate the pdf\n # exp = tf.distributions.Exponential(rate=1./beta)\n # pdf = exp.prob(value=data)\n pdf = 1./beta*tf.exp(-data/beta)\n pdf = tf.where(tf.less(data, 0.), tf.zeros_like(pdf), pdf)\n return(pdf)\n\n @classmethod\n def fit(cls, parameters: Dict[str, Tensor],\n data: tf.Tensor) -> Dict[str, Tensor]:\n beta = tf.reduce_mean(tf.abs(data), axis=0)\n updatedParameters = {\"beta\": beta}\n return(updatedParameters)\n\n @classmethod\n def llh(cls, parameters: Dict[str, Tensor], data: tf.Tensor) -> Tensor:\n beta = parameters[\"beta\"]\n # TODO: once tensorflow issue #20737 is resolved use the\n # commented out code to calculate the llh\n # exp = tf.distributions.Exponential(rate=1./beta)\n # llh = exp.log_prob(value=data)\n llh = -tf.log(beta) - data/beta\n llh = tf.where(data < 0., -tf.ones_like(llh)*np.inf, llh)\n return(llh)\n\n @classmethod\n def fitLatents(cls, parameters: Dict[str, Tensor],\n data: Tensor) -> Dict[str, Tensor]:\n return({})\n" ]
[ [ "tensorflow.less", "tensorflow.ones_like", "tensorflow.exp", "tensorflow.zeros_like", "tensorflow.distributions.Exponential", "tensorflow.log", "tensorflow.abs" ] ]
madhavjk/warehouse_safety_kit_detection
[ "744c88e6c72b3825b30fadd6a881a3fb0c4820b1" ]
[ "com_ineuron_apparel/predictor_yolo_detector/utils/google_utils.py" ]
[ "# This file contains google utils: https://cloud.google.com/storage/docs/reference/libraries\n# pip install --upgrade google-cloud-storage\n# from google.cloud import storage\n\nimport os\nimport platform\nimport subprocess\nimport time\nfrom pathlib import Path\n\nimport torch\n\n\ndef gsutil_getsize(url=''):\n # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du\n s = subprocess.check_output('gsutil du %s' % url, shell=True).decode('utf-8')\n return eval(s.split(' ')[0]) if len(s) else 0 # bytes\n\n\ndef attempt_download(weights):\n # Attempt to download pretrained weights if not found locally\n weights = weights.strip().replace(\"'\", '')\n file = Path(weights).name\n\n msg = weights + ' missing, try downloading from https://github.com/ultralytics/yolov5/releases/'\n models = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt'] # available models\n\n if file in models and not os.path.isfile(weights):\n # Google Drive\n # d = {'yolov5s.pt': '1R5T6rIyy3lLwgFXNms8whc-387H0tMQO',\n # 'yolov5m.pt': '1vobuEExpWQVpXExsJ2w-Mbf3HJjWkQJr',\n # 'yolov5l.pt': '1hrlqD1Wdei7UT4OgT785BEk1JwnSvNEV',\n # 'yolov5x.pt': '1mM8aZJlWTxOg7BZJvNUMrTnA2AbeCVzS'}\n # r = gdrive_download(id=d[file], name=weights) if file in d else 1\n # if r == 0 and os.path.exists(weights) and os.path.getsize(weights) > 1E6: # check\n # return\n\n try: # GitHub\n url = 'https://github.com/ultralytics/yolov5/releases/download/v3.1/' + file\n print('Downloading %s to %s...' % (url, weights))\n torch.hub.download_url_to_file(url, weights)\n assert os.path.exists(weights) and os.path.getsize(weights) > 1E6 # check\n except Exception as e: # GCP\n print('Download error: %s' % e)\n url = 'https://storage.googleapis.com/ultralytics/yolov5/ckpt/' + file\n print('Downloading %s to %s...' % (url, weights))\n r = os.system('curl -L %s -o %s' % (url, weights)) # torch.hub.download_url_to_file(url, weights)\n finally:\n if not (os.path.exists(weights) and os.path.getsize(weights) > 1E6): # check\n os.remove(weights) if os.path.exists(weights) else None # remove partial downloads\n print('ERROR: Download failure: %s' % msg)\n print('')\n return\n\n\ndef gdrive_download(id='1n_oKgR81BJtqk75b00eAjdv03qVCQn2f', name='coco128.zip'):\n # Downloads a file from Google Drive. from utils.google_utils import *; gdrive_download()\n t = time.time()\n\n print('Downloading https://drive.google.com/uc?export=download&id=%s as %s... ' % (id, name), end='')\n os.remove(name) if os.path.exists(name) else None # remove existing\n os.remove('cookie') if os.path.exists('cookie') else None\n\n # Attempt file download\n out = \"NUL\" if platform.system() == \"Windows\" else \"/dev/null\"\n os.system('curl -c ./cookie -s -L \"drive.google.com/uc?export=download&id=%s\" > %s ' % (id, out))\n if os.path.exists('cookie'): # large file\n s = 'curl -Lb ./cookie \"drive.google.com/uc?export=download&confirm=%s&id=%s\" -o %s' % (get_token(), id, name)\n else: # small file\n s = 'curl -s -L -o %s \"drive.google.com/uc?export=download&id=%s\"' % (name, id)\n r = os.system(s) # execute, capture return\n os.remove('cookie') if os.path.exists('cookie') else None\n\n # Error check\n if r != 0:\n os.remove(name) if os.path.exists(name) else None # remove partial\n print('Download error ') # raise Exception('Download error')\n return r\n\n # Unzip if archive\n if name.endswith('.zip'):\n print('unzipping... ', end='')\n os.system('unzip -q %s' % name) # unzip\n os.remove(name) # remove zip to free space\n\n print('Done (%.1fs)' % (time.time() - t))\n return r\n\n\ndef get_token(cookie=\"./cookie\"):\n with open(cookie) as f:\n for line in f:\n if \"download\" in line:\n return line.split()[-1]\n return \"\"\n\n# def upload_blob(bucket_name, source_file_name, destination_blob_name):\n# # Uploads a file to a bucket\n# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python\n#\n# storage_client = storage.Client()\n# bucket = storage_client.get_bucket(bucket_name)\n# blob = bucket.blob(destination_blob_name)\n#\n# blob.upload_from_filename(source_file_name)\n#\n# print('File {} uploaded to {}.'.format(\n# source_file_name,\n# destination_blob_name))\n#\n#\n# def download_blob(bucket_name, source_blob_name, destination_file_name):\n# # Uploads a blob from a bucket\n# storage_client = storage.Client()\n# bucket = storage_client.get_bucket(bucket_name)\n# blob = bucket.blob(source_blob_name)\n#\n# blob.download_to_filename(destination_file_name)\n#\n# print('Blob {} downloaded to {}.'.format(\n# source_blob_name,\n# destination_file_name))\n" ]
[ [ "torch.hub.download_url_to_file" ] ]
ctberthiaume/opedia_dataset_validator
[ "0d88aa0a37a203a894affed60961ab3f17ecdc93" ]
[ "src/opedia_dataset_validator/validator.py" ]
[ "from __future__ import unicode_literals\nfrom .error import error\nfrom io import open\nimport arrow\nimport os\nimport oyaml as yaml\nimport pandas as pd\nimport re\nimport sys\n\n# Load dataset file specifications\nspec_file_name = 'dataset_file_def.yaml'\nspec_file_path = os.path.join(os.path.dirname(__file__), spec_file_name)\nwith open(spec_file_path, encoding='utf-8') as fh:\n spec = yaml.load(fh)\n\n\ndef validate(input_path):\n if (sys.version_info > (3, 0)):\n wb = pd.read_excel(input_path, sheet_name=None, na_values=[],\n keep_default_na=False, dtype=str)\n else:\n wb = pd.read_excel(input_path, sheet_name=None, na_values=[],\n keep_default_na=False, dtype=unicode)\n\n errors = []\n errors.extend(validate_filename(input_path, spec))\n errors.extend(validate_sheet_metadata(wb, spec))\n errors.extend(validate_sheet_vars(wb, spec))\n errors.extend(validate_sheet_data(wb, spec))\n return errors\n\n\ndef validate_column_datetimes(series, colspec, sheet):\n errors = []\n\n empty_errors, series = validate_column_generic(series, colspec, sheet)\n errors.extend(empty_errors)\n\n # Now look for format errors in non-empty rows\n present = series[series.str.len() > 0]\n for idx, val in present.iteritems():\n try:\n dt = arrow.get(val, colspec['format'])\n except ValueError as e:\n errors.append(error({\n 'message': 'error in datetime string: %s' % e,\n 'value': val,\n 'row': idx,\n 'column': series.name,\n 'sheet': sheet\n }))\n except arrow.parser.ParserError as e:\n errors.append(error({\n 'message': 'invalid datetime string - should match %s' % colspec['format'],\n 'value': val,\n 'row': idx,\n 'column': series.name,\n 'sheet': sheet\n }))\n\n return errors\n\n\ndef validate_column_floats(series, colspec, sheet):\n errors = []\n\n empty_errors, series = validate_column_generic(series, colspec, sheet)\n errors.extend(empty_errors)\n\n # Convert to floats\n converted = pd.to_numeric(series, errors='coerce')\n\n # Non-numeric strings are now NaN\n # Flag NaN as errors\n nonnumeric_errors = series[pd.isna(converted)]\n for idx, val in nonnumeric_errors.iteritems():\n errors.append(error({\n 'message': 'invalid value',\n 'value': val,\n 'row': idx,\n 'column': series.name,\n 'sheet': sheet\n }))\n # Check range\n min_errors = None\n max_errors = None\n if colspec.get('min', None) is not None:\n min_errors = series[converted < colspec['min']]\n for idx, val in min_errors.iteritems():\n errors.append(error({\n 'message': 'value less than minimum of {}'.format(colspec['min']),\n 'value': val,\n 'row': idx,\n 'column': series.name,\n 'sheet': sheet\n }))\n if colspec.get('max', None) is not None:\n max_errors = series[converted > colspec['max']]\n for idx, val in max_errors.iteritems():\n errors.append(error({\n 'message': 'value greater than maximum of {}'.format(colspec['max']),\n 'value': val,\n 'row': idx,\n 'column': series.name,\n 'sheet': sheet\n }))\n\n return errors\n\n\ndef validate_column_generic(series, colspec, sheet):\n errors = []\n\n required = colspec.get('required', None)\n na = colspec.get('na', None)\n\n if not required:\n # Empty cell is a valid value. Remove empty cells before further checks\n series = series[series.str.len() > 0]\n elif str(na) == '':\n # Empty cell is a valid value. Remove empty cells before further checks\n series = series[series.str.len() > 0]\n else:\n # NA is None or is not the empty string, therefore empty cells are not\n # valid values. Flag as errors.\n empty_errors = series[series.str.len() == 0]\n for idx, val in empty_errors.iteritems():\n errors.append(error({\n 'message': 'missing required field',\n 'row': idx,\n 'column': series.name,\n 'sheet': sheet\n }))\n # Now remove empty cells\n series = series[series.str.len() > 0]\n if na is not None:\n # Remove NA values before further checks\n series = series[series != na]\n\n return (errors, series)\n\n\ndef validate_column_strings(series, colspec, sheet):\n errors = []\n\n empty_errors, series = validate_column_generic(series, colspec, sheet)\n errors.extend(empty_errors)\n\n if colspec.get('max', None) is not None:\n maxlen_errors = series[series.str.len() > colspec['max']]\n for idx, val in maxlen_errors.iteritems():\n errors.append(error({\n 'message': 'string length > %d' % colspec['max'],\n 'value': val,\n 'row': idx,\n 'column': series.name,\n 'sheet': sheet\n }))\n\n return errors\n\n\ndef validate_filename(input_path, spec):\n fn = os.path.basename(input_path)\n errors = []\n filename_re = re.compile(r'^(?P<shortname>.+)_(?P<date>[^_]+)_(?P<version>[^_]+)\\.xlsx$')\n m = filename_re.match(fn)\n if not m:\n errors.append(error({\n 'message': 'filename does not match format <dataset_short_name>_<dataset_release_date>_v<dataset_version>.xlsx',\n 'value': fn\n }))\n else:\n try:\n dt = arrow.get(m.group('date'), spec['file_date'])\n except ValueError as e:\n errors.append(error({\n 'message': 'error in filename datetime string: %s' % e,\n 'value': m.group('date')\n }))\n except arrow.parser.ParserError as e:\n errors.append(error({\n 'message': 'date in filename must be in %s format' % spec['file_date'],\n 'value': m.group('date')\n }))\n if not re.match(r'^v.+$', m.group('version')):\n errors.append(error({\n 'message': 'version string in filename must start with \"v\"',\n 'value': fn\n }))\n return errors\n\n\ndef validate_sheet_data(wb, spec):\n errors = []\n\n if not 'data' in wb:\n errors.append(error({\n 'message': '\"%s\" worksheet is missing' % 'data',\n 'sheet': 'data'\n }))\n return errors\n\n df = wb['data']\n errors.extend(validate_sheet_generic(df, 'data', spec))\n\n # Next check columns in 'data' that were defined in 'vars_meta_data'\n # First make sure that 'vars_meta_data' doesn't have any errors, if it does\n # don't bother with any more checks here\n if len(validate_sheet_vars(wb, spec)) > 0:\n return errors\n\n # Now check custom data columns\n required_columns = list(spec['columns']['data'].keys())\n df_data = df.drop(required_columns, axis='columns')\n # Collect variable short names from vars_meta_data sheet and check that\n # data columns in 'data' sheet match data columns defined in 'vars' sheet.\n vars_defined = wb['vars_meta_data']['var_short_name'].tolist()\n vars_found = df_data.columns.tolist()\n extra_defined = set(vars_defined).difference(set(vars_found))\n extra_found = set(vars_found).difference(set(vars_defined))\n if extra_defined:\n errors.append(error({\n 'message': 'some data variables were defined in the \"%s\" worksheet but were not found in the \"%s\" worksheet' % ('vars_meta_data', 'data'),\n 'value': ', '.join(extra_defined)\n }))\n if extra_found:\n errors.append(error({\n 'message': 'some data variables were found in the \"%s\" worksheet but were not defined in the \"%s\" worksheet' % ('data', 'vars_meta_data'),\n 'value': ', '.join(extra_found)\n }))\n\n # Now validate the actual data only on the condition of\n # proper missing values.\n # TODO: Is there any type-checking expected in custom vars?\n vars_missing_value = wb['vars_meta_data']['var_missing_value'].tolist()\n for var, na in zip(vars_defined, vars_missing_value):\n if var not in extra_defined:\n sheet = 'vars_meta_data'\n colspec = { 'required': True, 'na': na }\n empty_errors, _ = validate_column_generic(df_data[var], colspec, 'data')\n errors.extend(empty_errors)\n\n return errors\n\n\ndef validate_sheet_generic(df, sheet, spec):\n errors = []\n\n required_columns = list(spec['columns'][sheet].keys())\n if df.columns.tolist()[:len(required_columns)] != required_columns:\n errors.append(error({\n 'message': 'the first %d columns of the \"%s\" worksheet should be %s' % (len(required_columns), sheet, required_columns),\n 'value': str(df.columns.tolist()),\n 'sheet': sheet\n }))\n return errors\n\n # Validate cells\n for colname, colspec in spec['columns'][sheet].items():\n v = validator_lookup[colspec['type']]\n errors.extend(v(df[colname], colspec, sheet))\n\n return errors\n\n\ndef validate_sheet_metadata(wb, spec):\n errors = []\n\n if not 'dataset_meta_data' in wb:\n errors.append(error({\n 'message': '\"%s\" worksheet is missing' % 'dataset_meta_data',\n 'sheet': 'dataset_meta_data'\n }))\n return errors\n\n df = wb['dataset_meta_data']\n errors.extend(validate_sheet_generic(df, 'dataset_meta_data', spec))\n\n return errors\n\n\ndef validate_sheet_vars(wb, spec=spec):\n errors = []\n\n if not 'vars_meta_data' in wb:\n errors.append(error({\n 'message': '\"%s\" worksheet is missing' % 'vars_meta_data',\n 'sheet': 'vars_meta_data'\n }))\n return errors\n\n df = wb['vars_meta_data']\n errors.extend(validate_sheet_generic(df, 'vars_meta_data', spec))\n\n return errors\n\n\n# Register column validators in lookup\nvalidator_lookup = {\n 'float': validate_column_floats,\n 'string': validate_column_strings,\n 'datetime': validate_column_datetimes,\n 'generic': validate_column_generic\n}\n" ]
[ [ "pandas.isna", "pandas.read_excel", "pandas.to_numeric" ] ]
kajikentaro/ssd_keras
[ "86ac105a4e2e8be28605ca772e85842cc85748d3" ]
[ "main.py" ]
[ "from keras.applications.imagenet_utils import preprocess_input\nfrom keras.backend.tensorflow_backend import set_session\nfrom keras.preprocessing import image\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom imageio import imread\nimport tensorflow as tf\nfrom ssd_v2 import SSD300v2\nfrom ssd_utils import BBoxUtility\nnp.set_printoptions(suppress=True)\nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.45\nset_session(tf.Session(config=config))\nvoc_classes = ['Aeroplane', 'Bicycle', 'Bird', 'Boat', 'Bottle',\n 'Bus', 'Car', 'Cat', 'Chair', 'Cow', 'Diningtable',\n 'Dog', 'Horse','Motorbike', 'Person', 'Pottedplant',\n 'Sheep', 'Sofa', 'Train', 'Tvmonitor']\nNUM_CLASSES = len(voc_classes) + 1\ninput_shape = (300, 300, 3)\nmodel = SSD300v2(input_shape, num_classes=NUM_CLASSES)\nmodel.load_weights('weights_SSD300.hdf5', by_name=True)\nbbox_util = BBoxUtility(NUM_CLASSES)\n\ndef pltToCV2(fig):\n\timport io\n\tbuf = io.BytesIO() # インメモリのバイナリストリームを作成\n\tfig.savefig(buf, format=\"png\", dpi=180) # matplotlibから出力される画像のバイナリデータをメモリに格納する.\n\tbuf.seek(0) # ストリーム位置を先頭に戻る\n\timg_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8) # メモリからバイナリデータを読み込み, numpy array 形式に変換\n\tbuf.close() # ストリームを閉じる(flushする)\n\timg = cv2.imdecode(img_arr, 1) # 画像のバイナリデータを復元する\n\timg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # cv2.imread() はBGR形式で読み込むのでRGBにする.\n\treturn img\n\ndef predict(inputs, images):\n\tpreds = model.predict(inputs, batch_size=1, verbose=1)\n\tresults = bbox_util.detection_out(preds)\n\tfor i, img in enumerate(images):\n\t\t# Parse the outputs.\n\t\tdet_label = results[i][:, 0]\n\t\tdet_conf = results[i][:, 1]\n\t\tdet_xmin = results[i][:, 2]\n\t\tdet_ymin = results[i][:, 3]\n\t\tdet_xmax = results[i][:, 4]\n\t\tdet_ymax = results[i][:, 5]\n\n\t\t# Get detections with confidence higher than 0.6.\n\t\ttop_indices = [i for i, conf in enumerate(det_conf) if conf >= 0.6]\n\n\t\ttop_conf = det_conf[top_indices]\n\t\ttop_label_indices = det_label[top_indices].tolist()\n\t\ttop_xmin = det_xmin[top_indices]\n\t\ttop_ymin = det_ymin[top_indices]\n\t\ttop_xmax = det_xmax[top_indices]\n\t\ttop_ymax = det_ymax[top_indices]\n\n\t\tcolors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()\n\t\tplt.imshow(img / 255.)\n\t\tcurrentAxis = plt.gca()\n\n\t\tfor i in range(top_conf.shape[0]):\n\t\t\txmin = int(round(top_xmin[i] * img.shape[1]))\n\t\t\tymin = int(round(top_ymin[i] * img.shape[0]))\n\t\t\txmax = int(round(top_xmax[i] * img.shape[1]))\n\t\t\tymax = int(round(top_ymax[i] * img.shape[0]))\n\t\t\tscore = top_conf[i]\n\t\t\tlabel = int(top_label_indices[i])\n\t\t\tlabel_name = voc_classes[label - 1]\n\t\t\tdisplay_txt = '{:0.2f}, {}'.format(score, label_name)\n\t\t\tcoords = (xmin, ymin), xmax-xmin+1, ymax-ymin+1\n\t\t\tcolor = colors[label]\n\t\t\tcurrentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))\n\t\t\tcurrentAxis.text(xmin, ymin, display_txt, bbox={'facecolor':color, 'alpha':0.5})\n\t\treturn plt\n\nimport cv2\nimport time\nfrom keras.preprocessing import image\nimport numpy as np\ncap = cv2.VideoCapture(0)\nwhile True:\n\t#time.sleep(1)\n\tinputs = []\n\timages = []\n\n\tret, frame = cap.read()\n\timages.append(frame)\n\tframe = image.img_to_array(frame)\n\n\tresized = cv2.resize(frame, (300, 300))\n\tinputs.append(resized)\n\n\tinputs = preprocess_input(np.array(inputs))\n\tplt = predict(inputs, images)\n\timg = pltToCV2(plt)\n\tplt.close()\n\n\tcv2.imshow('tmp',img)\n\n\tcv2.waitKey(1)\n\n\n\ncv2.destroyAllWindows()\ncap.release()" ]
[ [ "numpy.linspace", "numpy.set_printoptions", "tensorflow.ConfigProto", "tensorflow.Session", "numpy.array" ] ]
ziyeshanwai/SSDR
[ "71ef96eabafbfe88d19955d47218a1557a396b31" ]
[ "SSDR.py" ]
[ "# SSDR Implementation in Python\n# Dalton Omens\n\nimport maya.api.OpenMaya as om\nimport pymel.core as pm\nimport numpy as np\nfrom scipy.optimize import lsq_linear\nfrom scipy.cluster.vq import vq, kmeans, whiten\nimport time\n\n\ndef kabsch(P, Q):\n \"\"\"\n Computes the optimal translation and rotation matrices that minimize the \n RMS deviation between two sets of points P and Q using Kabsch's algorithm.\n More here: https://en.wikipedia.org/wiki/Kabsch_algorithm\n Inspiration: https://github.com/charnley/rmsd\n \n inputs: P N x 3 numpy matrix representing the coordinates of the points in P\n Q N x 3 numpy matrix representing the coordinates of the points in Q\n \n return: A 4 x 3 matrix where the first 3 rows are the rotation and the last is translation\n \"\"\"\n if (P.size == 0 or Q.size == 0):\n raise ValueError(\"Empty matrices sent to kabsch\")\n centroid_P = np.mean(P, axis=0)\n centroid_Q = np.mean(Q, axis=0)\n P_centered = P - centroid_P # Center both matrices on centroid\n Q_centered = Q - centroid_Q\n H = P_centered.T.dot(Q_centered) # covariance matrix\n U, S, V = np.linalg.svd(H) # SVD\n R = U.dot(V).T # calculate optimal rotation\n if np.linalg.det(R) < 0: # correct rotation matrix for \n V[2,:] *= -1 # right-hand coordinate system\n R = U.dot(V).T \n t = centroid_Q - R.dot(centroid_P) # translation vector\n return np.vstack((R, t))\n\n\ndef initialize(poses, rest_pose, num_bones, iterations=5):\n \"\"\"\n Uses the k-means algorithm to initialize bone transformations.\n\n inputs: poses |num_poses| x |num_verts| x 3 matrix representing coordinates of vertices of each pose\n rest_pose |num_verts| x 3 numpy matrix representing the coordinates of vertices in rest pose\n num_bones Number of bones to initialize\n iterations Number of iterations to run the k-means algorithm\n\n return: A |num_bones| x |num_poses| x 4 x 3 matrix representing the stacked Rotation and Translation\n for each pose, for each bone.\n A |num_bones| x 3 matrix representing the translations of the rest bones.\n \"\"\"\n num_verts = rest_pose.shape[0]\n num_poses = poses.shape[0]\n bone_transforms = np.empty((num_bones, num_poses, 4, 3)) # [(R, T) for for each pose] for each bone\n # 3rd dim has 3 rows for R and 1 row for T \n rest_bones_t = np.empty((num_bones, 3)) # Translations for bones at rest pose\n rest_pose_corrected = np.empty((num_bones, num_verts, 3)) # Rest pose - mean of vertices attached to each bone\n\n # Use k-means to assign bones to vertices\n whitened = whiten(rest_pose)\n codebook, _ = kmeans(whitened, num_bones)\n vert_assignments, _ = vq(whitened, codebook) # Bone assignment for each vertex (|num_verts| x 1)\n \n # Compute initial random bone transformations\n for bone in range(num_bones):\n rest_bones_t[bone] = np.mean(rest_pose[vert_assignments == bone], axis=0)\n rest_pose_corrected[bone] = rest_pose - np.mean(rest_pose[vert_assignments == bone], axis=0)\n for pose in range(num_poses):\n bone_transforms[bone, pose] = kabsch(rest_pose_corrected[bone, vert_assignments == bone], poses[pose, vert_assignments == bone])\n \n for it in range(iterations):\n # Re-assign bones to vertices using smallest reconstruction error from all poses\n constructed = np.empty((num_bones, num_poses, num_verts, 3)) # |num_bones| x |num_poses| x |num_verts| x 3\n for bone in range(num_bones):\n Rp = bone_transforms[bone,:,:3,:].dot((rest_pose - rest_bones_t[bone]).T).transpose((0, 2, 1)) # |num_poses| x |num_verts| x 3\n # R * p + T\n constructed[bone] = Rp + bone_transforms[bone, :, np.newaxis, 3, :]\n errs = np.linalg.norm(constructed - poses, axis=(1, 3))\n vert_assignments = np.argmin(errs, axis=0) \n \n ## Visualization of vertex assignments for bone 0 over iterations\n ## Make 5 copies of an example pose mesh and call them test0, test1...\n #for i in range(num_verts):\n # if vert_assignments[i] == 0:\n # pm.select('test{0}.vtx[{1}]'.format(it, i), add=True)\n #print(vert_assignments)\n\n # For each bone, for each pose, compute new transform using kabsch\n for bone in range(num_bones):\n rest_bones_t[bone] = np.mean(rest_pose[vert_assignments == bone], axis=0)\n rest_pose_corrected[bone] = rest_pose - np.mean(rest_pose[vert_assignments == bone], axis=0)\n for pose in range(num_poses):\n bone_transforms[bone, pose] = kabsch(rest_pose_corrected[bone, vert_assignments == bone], poses[pose, vert_assignments == bone])\n\n return bone_transforms, rest_bones_t\n\n\ndef update_weight_map(bone_transforms, rest_bones_t, poses, rest_pose, sparseness):\n \"\"\"\n Update the bone-vertex weight map W by fixing bone transformations and using a least squares\n solver subject to non-negativity constraint, affinity constraint, and sparseness constraint.\n\n inputs: bone_transforms |num_bones| x |num_poses| x 4 x 3 matrix representing the stacked \n Rotation and Translation for each pose, for each bone.\n rest_bones_t |num_bones| x 3 matrix representing the translations of the rest bones\n poses |num_poses| x |num_verts| x 3 matrix representing coordinates of vertices of each pose\n rest_pose |num_verts| x 3 numpy matrix representing the coordinates of vertices in rest pose\n sparseness Maximum number of bones allowed to influence a particular vertex\n\n return: A |num_verts| x |num_bones| weight map representing the influence of the jth bone on the ith vertex\n \"\"\"\n num_verts = rest_pose.shape[0]\n num_poses = poses.shape[0]\n num_bones = bone_transforms.shape[0]\n\n W = np.empty((num_verts, num_bones))\n\n for v in range(num_verts):\n # For every vertex, solve a least squares problem\n Rp = np.empty((num_bones, num_poses, 3))\n for bone in range(num_bones):\n Rp[bone] = bone_transforms[bone,:,:3,:].dot(rest_pose[v] - rest_bones_t[bone]) # |num_bones| x |num_poses| x 3\n # R * p + T\n Rp_T = Rp + bone_transforms[:, :, 3, :] # |num_bones| x |num_poses| x 3\n A = Rp_T.transpose((1, 2, 0)).reshape((3 * num_poses, num_bones)) # 3 * |num_poses| x |num_bones|\n b = poses[:, v, :].reshape(3 * num_poses) # 3 * |num_poses| x 1\n\n # Bounds ensure non-negativity constraint and kind of affinity constraint\n w = lsq_linear(A, b, bounds=(0, 1), method='bvls').x # |num_bones| x 1\n w /= np.sum(w) # Ensure that w sums to 1 (affinity constraint)\n\n # Remove |B| - |K| bone weights with the least \"effect\"\n effect = np.linalg.norm((A * w).reshape(num_poses, 3, num_bones), axis=1) # |num_poses| x |num_bones|\n effect = np.sum(effect, axis=0) # |num_bones| x 1\n num_discarded = max(num_bones - sparseness, 0)\n effective = np.argpartition(effect, num_discarded)[num_discarded:] # |sparseness| x 1\n\n # Run least squares again, but only use the most effective bones\n A_reduced = A[:, effective] # 3 * |num_poses| x |sparseness|\n w_reduced = lsq_linear(A_reduced, b, bounds=(0, 1), method='bvls').x # |sparseness| x 1\n w_reduced /= np.sum(w_reduced) # Ensure that w sums to 1 (affinity constraint)\n\n w_sparse = np.zeros(num_bones)\n w_sparse[effective] = w_reduced\n w_sparse /= np.sum(w_sparse) # Ensure that w_sparse sums to 1 (affinity constraint)\n\n W[v] = w_sparse\n\n return W\n\n\ndef update_bone_transforms(W, bone_transforms, rest_bones_t, poses, rest_pose):\n \"\"\"\n Updates the bone transformations by fixing the bone-vertex weight map and minimizing an\n objective function individually for each pose and each bone.\n \n inputs: W |num_verts| x |num_bones| matrix: bone-vertex weight map. Rows sum to 1, sparse.\n bone_transforms |num_bones| x |num_poses| x 4 x 3 matrix representing the stacked \n Rotation and Translation for each pose, for each bone.\n rest_bones_t |num_bones| x 3 matrix representing the translations of the rest bones\n poses |num_poses| x |num_verts| x 3 matrix representing coordinates of vertices of each pose\n rest_pose |num_verts| x 3 numpy matrix representing the coordinates of vertices in rest pose\n \n return: |num_bones| x |num_poses| x 4 x 3 matrix representing the stacked \n Rotation and Translation for each pose, for each bone.\n \"\"\"\n num_bones = W.shape[1]\n num_poses = poses.shape[0]\n num_verts = W.shape[0]\n \n for pose in range(num_poses):\n for bone in range(num_bones):\n # Represents the points in rest pose without this rest bone's translation\n p_corrected = rest_pose - rest_bones_t[bone] # |num_verts| x 3\n\n # Calculate q_i for all vertices by equation (6)\n constructed = np.empty((num_bones, num_verts, 3)) # |num_bones| x |num_verts| x 3\n for bone2 in range(num_bones):\n # can't use p_corrected before because we want to correct for every bone2 distinctly\n Rp = bone_transforms[bone2,pose,:3,:].dot((rest_pose - rest_bones_t[bone2]).T).T # |num_verts| x 3\n # R * p + T\n constructed[bone2] = Rp + bone_transforms[bone2, pose, 3, :]\n # w * (R * p + T)\n constructed = constructed.transpose((1, 0, 2)) * W[:, :, np.newaxis] # |num_verts| x |num_bones| x 3\n constructed = np.delete(constructed, bone, axis=1) # |num_verts| x |num_bones-1| x 3\n q = poses[pose] - np.sum(constructed, axis=1) # |num_verts| x 3\n\n # Calculate p_star, q_star, p_bar, and q_bar for all verts by equation (8)\n p_star = np.sum(np.square(W[:, bone, np.newaxis]) * p_corrected, axis=0) # |num_verts| x 3 => 3 x 1\n p_star /= np.sum(np.square(W[:, bone])) # 3 x 1\n \n q_star = np.sum(W[:, bone, np.newaxis] * q, axis=0) # |num_verts| x 3 => 3 x 1\n q_star /= np.sum(np.square(W[:, bone])) # 3 x 1\n p_bar = p_corrected - p_star # |num_verts| x 3\n q_bar = q - W[:, bone, np.newaxis] * q_star # |num_verts| x 3\n \n # Perform SVD by equation (9)\n P = (p_bar * W[:, bone, np.newaxis]).T # 3 x |num_verts|\n Q = q_bar.T # 3 x |num_verts|\n \n U, S, V = np.linalg.svd(np.matmul(P, Q.T))\n\n # Calculate rotation R and translation t by equation (10)\n R = U.dot(V).T # 3 x 3\n t = q_star - R.dot(p_star) # 3 x 1\n \n bone_transforms[bone, pose, :3, :] = R\n bone_transforms[bone, pose, 3, :] = t\n \n return bone_transforms\n\n\ndef SSDR(poses, rest_pose, num_bones, sparseness=4, max_iterations=20):\n \"\"\"\n Computes the Smooth Skinning Decomposition with Rigid bones\n \n inputs: poses |num_poses| x |num_verts| x 3 matrix representing coordinates of vertices of each pose\n rest_pose |num_verts| x 3 numpy matrix representing the coordinates of vertices in rest pose\n num_bones number of bones to create\n sparseness max number of bones influencing a single vertex\n \n return: An i x j matrix of bone-vertex weights, where i = # vertices and j = # bones\n A length-B list of (length-t lists of bone transformations [R_j | T_j] ), one list for each bone\n A list of bone translations for the bones at rest\n \"\"\"\n start_time = time.time()\n\n bone_transforms, rest_bones_t = initialize(poses, rest_pose, num_bones)\n for _ in range(max_iterations):\n W = update_weight_map(bone_transforms, rest_bones_t, poses, rest_pose, sparseness)\n bone_transforms = update_bone_transforms(W, bone_transforms, rest_bones_t, poses, rest_pose)\n print(\"Reconstruction error:\", reconstruction_err(poses, rest_pose, bone_transforms, rest_bones_t, W))\n \n end_time = time.time()\n print(\"Done. Calculation took {0} seconds\".format(end_time - start_time))\n print(\"Avg reconstruction error:\", reconstruction_err(poses, rest_pose, bone_transforms, rest_bones_t, W))\n\n return W, bone_transforms, rest_bones_t\n\n\ndef reconstruction_err(poses, rest_pose, bone_transforms, rest_bones_t, W):\n \"\"\"\n Computes the average reconstruction error on some poses given bone transforms and weights.\n\n inputs : poses |num_poses| x |num_verts| x 3 matrix representing coordinates of vertices of each pose\n rest_pose |num_verts| x 3 numpy matrix representing the coordinates of vertices in rest pose\n bone_transforms |num_bones| x |num_poses| x 4 x 3 matrix representing the stacked \n Rotation and Translation for each pose, for each bone.\n rest_bones_t |num_bones| x 3 matrix representing the translations of the rest bones\n W |num_verts| x |num_bones| matrix: bone-vertex weight map. Rows sum to 1, sparse.\n\n return: The average reconstruction error v - sum{bones} (w * (R @ p + T))\n \"\"\"\n num_bones = bone_transforms.shape[0]\n num_verts = W.shape[0]\n num_poses = poses.shape[0]\n # Points in rest pose without rest bone translations\n p_corrected = rest_pose[np.newaxis, :, :] - rest_bones_t[:, np.newaxis, :] # |num_bones| x |num_verts| x 3\n constructions = np.empty((num_bones, num_poses, num_verts, 3)) # |num_bones| x |num_poses| x |num_verts| x 3\n for bone in range(num_bones):\n # When you are a vectorizing GOD\n constructions[bone] = np.einsum('ijk,lk->ilj', bone_transforms[bone, :, :3, :], p_corrected[bone]) # |num_poses| x |num_verts| x 3\n constructions += bone_transforms[:, :, np.newaxis, 3, :] # |num_bones| x |num_poses| x |num_verts| x 3\n constructions *= (W.T)[:, np.newaxis, :, np.newaxis] # |num_bones| x |num_poses| x |num_verts| x 3\n errors = poses - np.sum(constructions, axis=0) # |num_poses| x |num_verts| x 3\n return np.mean(np.linalg.norm(errors, axis=2))\n\n\n# Get numpy vertex arrays from selected objects. Rest pose is most recently selected.\nselectionLs = om.MGlobal.getActiveSelectionList()\nnum_poses = selectionLs.length() - 1\nrest_pose = np.array(om.MFnMesh(selectionLs.getDagPath(num_poses)).getPoints(om.MSpace.kWorld))[:, :3]\nposes = np.array([om.MFnMesh(selectionLs.getDagPath(i)).getPoints(om.MSpace.kWorld) for i in range(num_poses)])[:, :, :3]\n\nW, bone_transforms, rest_bones_t = SSDR(poses, rest_pose, 2)\n" ]
[ [ "scipy.cluster.vq.whiten", "numpy.square", "numpy.linalg.svd", "scipy.cluster.vq.vq", "numpy.einsum", "scipy.cluster.vq.kmeans", "numpy.vstack", "numpy.matmul", "numpy.linalg.norm", "numpy.linalg.det", "scipy.optimize.lsq_linear", "numpy.delete", "numpy.mean", "numpy.argmin", "numpy.argpartition", "numpy.zeros", "numpy.sum", "numpy.empty" ] ]
znicholls/xarray
[ "98e96923293aa8d21e2339e5c890df8c2633493f" ]
[ "xarray/tests/test_dataset.py" ]
[ "import pickle\nimport sys\nimport warnings\nfrom copy import copy, deepcopy\nfrom io import StringIO\nfrom textwrap import dedent\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom pandas.core.indexes.datetimes import DatetimeIndex\n\nimport xarray as xr\nfrom xarray import (\n DataArray,\n Dataset,\n IndexVariable,\n MergeError,\n Variable,\n align,\n backends,\n broadcast,\n open_dataset,\n set_options,\n)\nfrom xarray.coding.cftimeindex import CFTimeIndex\nfrom xarray.core import dtypes, indexing, utils\nfrom xarray.core.common import duck_array_ops, full_like\nfrom xarray.core.npcompat import IS_NEP18_ACTIVE\nfrom xarray.core.pycompat import integer_types\nfrom xarray.core.utils import is_scalar\n\nfrom . import (\n InaccessibleArray,\n UnexpectedDataAccess,\n assert_allclose,\n assert_array_equal,\n assert_equal,\n assert_identical,\n has_cftime,\n has_dask,\n raises_regex,\n requires_bottleneck,\n requires_cftime,\n requires_dask,\n requires_numbagg,\n requires_scipy,\n requires_sparse,\n source_ndarray,\n)\n\ntry:\n import dask.array as da\nexcept ImportError:\n pass\n\n\ndef create_test_data(seed=None):\n rs = np.random.RandomState(seed)\n _vars = {\n \"var1\": [\"dim1\", \"dim2\"],\n \"var2\": [\"dim1\", \"dim2\"],\n \"var3\": [\"dim3\", \"dim1\"],\n }\n _dims = {\"dim1\": 8, \"dim2\": 9, \"dim3\": 10}\n\n obj = Dataset()\n obj[\"time\"] = (\"time\", pd.date_range(\"2000-01-01\", periods=20))\n obj[\"dim2\"] = (\"dim2\", 0.5 * np.arange(_dims[\"dim2\"]))\n obj[\"dim3\"] = (\"dim3\", list(\"abcdefghij\"))\n for v, dims in sorted(_vars.items()):\n data = rs.normal(size=tuple(_dims[d] for d in dims))\n obj[v] = (dims, data, {\"foo\": \"variable\"})\n obj.coords[\"numbers\"] = (\n \"dim3\",\n np.array([0, 1, 2, 0, 0, 1, 1, 2, 2, 3], dtype=\"int64\"),\n )\n obj.encoding = {\"foo\": \"bar\"}\n assert all(obj.data.flags.writeable for obj in obj.variables.values())\n return obj\n\n\ndef create_append_test_data(seed=None):\n rs = np.random.RandomState(seed)\n\n lat = [2, 1, 0]\n lon = [0, 1, 2]\n nt1 = 3\n nt2 = 2\n time1 = pd.date_range(\"2000-01-01\", periods=nt1)\n time2 = pd.date_range(\"2000-02-01\", periods=nt2)\n string_var = np.array([\"ae\", \"bc\", \"df\"], dtype=object)\n string_var_to_append = np.array([\"asdf\", \"asdfg\"], dtype=object)\n unicode_var = [\"áó\", \"áó\", \"áó\"]\n datetime_var = np.array(\n [\"2019-01-01\", \"2019-01-02\", \"2019-01-03\"], dtype=\"datetime64[s]\"\n )\n datetime_var_to_append = np.array(\n [\"2019-01-04\", \"2019-01-05\"], dtype=\"datetime64[s]\"\n )\n bool_var = np.array([True, False, True], dtype=bool)\n bool_var_to_append = np.array([False, True], dtype=bool)\n\n ds = xr.Dataset(\n data_vars={\n \"da\": xr.DataArray(\n rs.rand(3, 3, nt1),\n coords=[lat, lon, time1],\n dims=[\"lat\", \"lon\", \"time\"],\n ),\n \"string_var\": xr.DataArray(string_var, coords=[time1], dims=[\"time\"]),\n \"unicode_var\": xr.DataArray(\n unicode_var, coords=[time1], dims=[\"time\"]\n ).astype(np.unicode_),\n \"datetime_var\": xr.DataArray(datetime_var, coords=[time1], dims=[\"time\"]),\n \"bool_var\": xr.DataArray(bool_var, coords=[time1], dims=[\"time\"]),\n }\n )\n\n ds_to_append = xr.Dataset(\n data_vars={\n \"da\": xr.DataArray(\n rs.rand(3, 3, nt2),\n coords=[lat, lon, time2],\n dims=[\"lat\", \"lon\", \"time\"],\n ),\n \"string_var\": xr.DataArray(\n string_var_to_append, coords=[time2], dims=[\"time\"]\n ),\n \"unicode_var\": xr.DataArray(\n unicode_var[:nt2], coords=[time2], dims=[\"time\"]\n ).astype(np.unicode_),\n \"datetime_var\": xr.DataArray(\n datetime_var_to_append, coords=[time2], dims=[\"time\"]\n ),\n \"bool_var\": xr.DataArray(bool_var_to_append, coords=[time2], dims=[\"time\"]),\n }\n )\n\n ds_with_new_var = xr.Dataset(\n data_vars={\n \"new_var\": xr.DataArray(\n rs.rand(3, 3, nt1 + nt2),\n coords=[lat, lon, time1.append(time2)],\n dims=[\"lat\", \"lon\", \"time\"],\n )\n }\n )\n\n assert all(objp.data.flags.writeable for objp in ds.variables.values())\n assert all(objp.data.flags.writeable for objp in ds_to_append.variables.values())\n return ds, ds_to_append, ds_with_new_var\n\n\ndef create_test_multiindex():\n mindex = pd.MultiIndex.from_product(\n [[\"a\", \"b\"], [1, 2]], names=(\"level_1\", \"level_2\")\n )\n return Dataset({}, {\"x\": mindex})\n\n\ndef create_test_stacked_array():\n x = DataArray(pd.Index(np.r_[:10], name=\"x\"))\n y = DataArray(pd.Index(np.r_[:20], name=\"y\"))\n a = x * y\n b = x * y * y\n return a, b\n\n\nclass InaccessibleVariableDataStore(backends.InMemoryDataStore):\n def __init__(self):\n super().__init__()\n self._indexvars = set()\n\n def store(self, variables, *args, **kwargs):\n super().store(variables, *args, **kwargs)\n for k, v in variables.items():\n if isinstance(v, IndexVariable):\n self._indexvars.add(k)\n\n def get_variables(self):\n def lazy_inaccessible(k, v):\n if k in self._indexvars:\n return v\n data = indexing.LazilyOuterIndexedArray(InaccessibleArray(v.values))\n return Variable(v.dims, data, v.attrs)\n\n return {k: lazy_inaccessible(k, v) for k, v in self._variables.items()}\n\n\nclass TestDataset:\n def test_repr(self):\n data = create_test_data(seed=123)\n data.attrs[\"foo\"] = \"bar\"\n # need to insert str dtype at runtime to handle different endianness\n expected = dedent(\n \"\"\"\\\n <xarray.Dataset>\n Dimensions: (dim1: 8, dim2: 9, dim3: 10, time: 20)\n Coordinates:\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 ... 2000-01-20\n * dim2 (dim2) float64 0.0 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0\n * dim3 (dim3) %s 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j'\n numbers (dim3) int64 0 1 2 0 0 1 1 2 2 3\n Dimensions without coordinates: dim1\n Data variables:\n var1 (dim1, dim2) float64 -1.086 0.9973 0.283 ... 0.1995 0.4684 -0.8312\n var2 (dim1, dim2) float64 1.162 -1.097 -2.123 ... 0.1302 1.267 0.3328\n var3 (dim3, dim1) float64 0.5565 -0.2121 0.4563 ... -0.2452 -0.3616\n Attributes:\n foo: bar\"\"\"\n % data[\"dim3\"].dtype\n )\n actual = \"\\n\".join(x.rstrip() for x in repr(data).split(\"\\n\"))\n print(actual)\n assert expected == actual\n\n with set_options(display_width=100):\n max_len = max(map(len, repr(data).split(\"\\n\")))\n assert 90 < max_len < 100\n\n expected = dedent(\n \"\"\"\\\n <xarray.Dataset>\n Dimensions: ()\n Data variables:\n *empty*\"\"\"\n )\n actual = \"\\n\".join(x.rstrip() for x in repr(Dataset()).split(\"\\n\"))\n print(actual)\n assert expected == actual\n\n # verify that ... doesn't appear for scalar coordinates\n data = Dataset({\"foo\": (\"x\", np.ones(10))}).mean()\n expected = dedent(\n \"\"\"\\\n <xarray.Dataset>\n Dimensions: ()\n Data variables:\n foo float64 1.0\"\"\"\n )\n actual = \"\\n\".join(x.rstrip() for x in repr(data).split(\"\\n\"))\n print(actual)\n assert expected == actual\n\n # verify long attributes are truncated\n data = Dataset(attrs={\"foo\": \"bar\" * 1000})\n assert len(repr(data)) < 1000\n\n def test_repr_multiindex(self):\n data = create_test_multiindex()\n expected = dedent(\n \"\"\"\\\n <xarray.Dataset>\n Dimensions: (x: 4)\n Coordinates:\n * x (x) MultiIndex\n - level_1 (x) object 'a' 'a' 'b' 'b'\n - level_2 (x) int64 1 2 1 2\n Data variables:\n *empty*\"\"\"\n )\n actual = \"\\n\".join(x.rstrip() for x in repr(data).split(\"\\n\"))\n print(actual)\n assert expected == actual\n\n # verify that long level names are not truncated\n mindex = pd.MultiIndex.from_product(\n [[\"a\", \"b\"], [1, 2]], names=(\"a_quite_long_level_name\", \"level_2\")\n )\n data = Dataset({}, {\"x\": mindex})\n expected = dedent(\n \"\"\"\\\n <xarray.Dataset>\n Dimensions: (x: 4)\n Coordinates:\n * x (x) MultiIndex\n - a_quite_long_level_name (x) object 'a' 'a' 'b' 'b'\n - level_2 (x) int64 1 2 1 2\n Data variables:\n *empty*\"\"\"\n )\n actual = \"\\n\".join(x.rstrip() for x in repr(data).split(\"\\n\"))\n print(actual)\n assert expected == actual\n\n def test_repr_period_index(self):\n data = create_test_data(seed=456)\n data.coords[\"time\"] = pd.period_range(\"2000-01-01\", periods=20, freq=\"B\")\n\n # check that creating the repr doesn't raise an error #GH645\n repr(data)\n\n def test_unicode_data(self):\n # regression test for GH834\n data = Dataset({\"foø\": [\"ba®\"]}, attrs={\"å\": \"∑\"})\n repr(data) # should not raise\n\n byteorder = \"<\" if sys.byteorder == \"little\" else \">\"\n expected = dedent(\n \"\"\"\\\n <xarray.Dataset>\n Dimensions: (foø: 1)\n Coordinates:\n * foø (foø) %cU3 %r\n Data variables:\n *empty*\n Attributes:\n å: ∑\"\"\"\n % (byteorder, \"ba®\")\n )\n actual = str(data)\n assert expected == actual\n\n @pytest.mark.skipif(not IS_NEP18_ACTIVE, reason=\"requires __array_function__\")\n def test_repr_nep18(self):\n class Array:\n def __init__(self):\n self.shape = (2,)\n self.dtype = np.dtype(np.float64)\n\n def __array_function__(self, *args, **kwargs):\n pass\n\n def __repr__(self):\n return \"Custom\\nArray\"\n\n dataset = Dataset({\"foo\": (\"x\", Array())})\n expected = dedent(\n \"\"\"\\\n <xarray.Dataset>\n Dimensions: (x: 2)\n Dimensions without coordinates: x\n Data variables:\n foo (x) float64 Custom Array\"\"\"\n )\n assert expected == repr(dataset)\n\n def test_info(self):\n ds = create_test_data(seed=123)\n ds = ds.drop_vars(\"dim3\") # string type prints differently in PY2 vs PY3\n ds.attrs[\"unicode_attr\"] = \"ba®\"\n ds.attrs[\"string_attr\"] = \"bar\"\n\n buf = StringIO()\n ds.info(buf=buf)\n\n expected = dedent(\n \"\"\"\\\n xarray.Dataset {\n dimensions:\n \\tdim1 = 8 ;\n \\tdim2 = 9 ;\n \\tdim3 = 10 ;\n \\ttime = 20 ;\n\n variables:\n \\tdatetime64[ns] time(time) ;\n \\tfloat64 dim2(dim2) ;\n \\tfloat64 var1(dim1, dim2) ;\n \\t\\tvar1:foo = variable ;\n \\tfloat64 var2(dim1, dim2) ;\n \\t\\tvar2:foo = variable ;\n \\tfloat64 var3(dim3, dim1) ;\n \\t\\tvar3:foo = variable ;\n \\tint64 numbers(dim3) ;\n\n // global attributes:\n \\t:unicode_attr = ba® ;\n \\t:string_attr = bar ;\n }\"\"\"\n )\n actual = buf.getvalue()\n assert expected == actual\n buf.close()\n\n def test_constructor(self):\n x1 = (\"x\", 2 * np.arange(100))\n x2 = (\"x\", np.arange(1000))\n z = ([\"x\", \"y\"], np.arange(1000).reshape(100, 10))\n\n with raises_regex(ValueError, \"conflicting sizes\"):\n Dataset({\"a\": x1, \"b\": x2})\n with raises_regex(ValueError, \"disallows such variables\"):\n Dataset({\"a\": x1, \"x\": z})\n with raises_regex(TypeError, \"tuple of form\"):\n Dataset({\"x\": (1, 2, 3, 4, 5, 6, 7)})\n with raises_regex(ValueError, \"already exists as a scalar\"):\n Dataset({\"x\": 0, \"y\": (\"x\", [1, 2, 3])})\n\n # verify handling of DataArrays\n expected = Dataset({\"x\": x1, \"z\": z})\n actual = Dataset({\"z\": expected[\"z\"]})\n assert_identical(expected, actual)\n\n def test_constructor_invalid_dims(self):\n # regression for GH1120\n with pytest.raises(MergeError):\n Dataset(\n data_vars=dict(v=(\"y\", [1, 2, 3, 4])),\n coords=dict(y=DataArray([0.1, 0.2, 0.3, 0.4], dims=\"x\")),\n )\n\n def test_constructor_1d(self):\n expected = Dataset({\"x\": ([\"x\"], 5.0 + np.arange(5))})\n actual = Dataset({\"x\": 5.0 + np.arange(5)})\n assert_identical(expected, actual)\n\n actual = Dataset({\"x\": [5, 6, 7, 8, 9]})\n assert_identical(expected, actual)\n\n def test_constructor_0d(self):\n expected = Dataset({\"x\": ([], 1)})\n for arg in [1, np.array(1), expected[\"x\"]]:\n actual = Dataset({\"x\": arg})\n assert_identical(expected, actual)\n\n class Arbitrary:\n pass\n\n d = pd.Timestamp(\"2000-01-01T12\")\n args = [\n True,\n None,\n 3.4,\n np.nan,\n \"hello\",\n b\"raw\",\n np.datetime64(\"2000-01-01\"),\n d,\n d.to_pydatetime(),\n Arbitrary(),\n ]\n for arg in args:\n print(arg)\n expected = Dataset({\"x\": ([], arg)})\n actual = Dataset({\"x\": arg})\n assert_identical(expected, actual)\n\n def test_constructor_deprecated(self):\n with raises_regex(ValueError, \"DataArray dimensions\"):\n DataArray([1, 2, 3], coords={\"x\": [0, 1, 2]})\n\n def test_constructor_auto_align(self):\n a = DataArray([1, 2], [(\"x\", [0, 1])])\n b = DataArray([3, 4], [(\"x\", [1, 2])])\n\n # verify align uses outer join\n expected = Dataset(\n {\"a\": (\"x\", [1, 2, np.nan]), \"b\": (\"x\", [np.nan, 3, 4])}, {\"x\": [0, 1, 2]}\n )\n actual = Dataset({\"a\": a, \"b\": b})\n assert_identical(expected, actual)\n\n # regression test for GH346\n assert isinstance(actual.variables[\"x\"], IndexVariable)\n\n # variable with different dimensions\n c = (\"y\", [3, 4])\n expected2 = expected.merge({\"c\": c})\n actual = Dataset({\"a\": a, \"b\": b, \"c\": c})\n assert_identical(expected2, actual)\n\n # variable that is only aligned against the aligned variables\n d = (\"x\", [3, 2, 1])\n expected3 = expected.merge({\"d\": d})\n actual = Dataset({\"a\": a, \"b\": b, \"d\": d})\n assert_identical(expected3, actual)\n\n e = (\"x\", [0, 0])\n with raises_regex(ValueError, \"conflicting sizes\"):\n Dataset({\"a\": a, \"b\": b, \"e\": e})\n\n def test_constructor_pandas_sequence(self):\n\n ds = self.make_example_math_dataset()\n pandas_objs = {\n var_name: ds[var_name].to_pandas() for var_name in [\"foo\", \"bar\"]\n }\n ds_based_on_pandas = Dataset(pandas_objs, ds.coords, attrs=ds.attrs)\n del ds_based_on_pandas[\"x\"]\n assert_equal(ds, ds_based_on_pandas)\n\n # reindex pandas obj, check align works\n rearranged_index = reversed(pandas_objs[\"foo\"].index)\n pandas_objs[\"foo\"] = pandas_objs[\"foo\"].reindex(rearranged_index)\n ds_based_on_pandas = Dataset(pandas_objs, ds.coords, attrs=ds.attrs)\n del ds_based_on_pandas[\"x\"]\n assert_equal(ds, ds_based_on_pandas)\n\n def test_constructor_pandas_single(self):\n\n das = [\n DataArray(np.random.rand(4), dims=[\"a\"]), # series\n DataArray(np.random.rand(4, 3), dims=[\"a\", \"b\"]), # df\n ]\n\n for a in das:\n pandas_obj = a.to_pandas()\n ds_based_on_pandas = Dataset(pandas_obj)\n for dim in ds_based_on_pandas.data_vars:\n assert_array_equal(ds_based_on_pandas[dim], pandas_obj[dim])\n\n def test_constructor_compat(self):\n data = {\"x\": DataArray(0, coords={\"y\": 1}), \"y\": (\"z\", [1, 1, 1])}\n expected = Dataset({\"x\": 0}, {\"y\": (\"z\", [1, 1, 1])})\n actual = Dataset(data)\n assert_identical(expected, actual)\n\n data = {\"y\": (\"z\", [1, 1, 1]), \"x\": DataArray(0, coords={\"y\": 1})}\n actual = Dataset(data)\n assert_identical(expected, actual)\n\n original = Dataset(\n {\"a\": ((\"x\", \"y\"), np.ones((2, 3)))},\n {\"c\": ((\"x\", \"y\"), np.zeros((2, 3))), \"x\": [0, 1]},\n )\n expected = Dataset(\n {\"a\": (\"x\", np.ones(2)), \"b\": (\"y\", np.ones(3))},\n {\"c\": ((\"x\", \"y\"), np.zeros((2, 3))), \"x\": [0, 1]},\n )\n\n actual = Dataset(\n {\"a\": original[\"a\"][:, 0], \"b\": original[\"a\"][0].drop_vars(\"x\")}\n )\n assert_identical(expected, actual)\n\n data = {\"x\": DataArray(0, coords={\"y\": 3}), \"y\": (\"z\", [1, 1, 1])}\n with pytest.raises(MergeError):\n Dataset(data)\n\n data = {\"x\": DataArray(0, coords={\"y\": 1}), \"y\": [1, 1]}\n actual = Dataset(data)\n expected = Dataset({\"x\": 0}, {\"y\": [1, 1]})\n assert_identical(expected, actual)\n\n def test_constructor_with_coords(self):\n with raises_regex(ValueError, \"found in both data_vars and\"):\n Dataset({\"a\": (\"x\", [1])}, {\"a\": (\"x\", [1])})\n\n ds = Dataset({}, {\"a\": (\"x\", [1])})\n assert not ds.data_vars\n assert list(ds.coords.keys()) == [\"a\"]\n\n mindex = pd.MultiIndex.from_product(\n [[\"a\", \"b\"], [1, 2]], names=(\"level_1\", \"level_2\")\n )\n with raises_regex(ValueError, \"conflicting MultiIndex\"):\n Dataset({}, {\"x\": mindex, \"y\": mindex})\n Dataset({}, {\"x\": mindex, \"level_1\": range(4)})\n\n def test_properties(self):\n ds = create_test_data()\n assert ds.dims == {\"dim1\": 8, \"dim2\": 9, \"dim3\": 10, \"time\": 20}\n assert list(ds.dims) == sorted(ds.dims)\n assert ds.sizes == ds.dims\n\n # These exact types aren't public API, but this makes sure we don't\n # change them inadvertently:\n assert isinstance(ds.dims, utils.Frozen)\n assert isinstance(ds.dims.mapping, utils.SortedKeysDict)\n assert type(ds.dims.mapping.mapping) is dict\n\n assert list(ds) == list(ds.data_vars)\n assert list(ds.keys()) == list(ds.data_vars)\n assert \"aasldfjalskdfj\" not in ds.variables\n assert \"dim1\" in repr(ds.variables)\n assert len(ds) == 3\n assert bool(ds)\n\n assert list(ds.data_vars) == [\"var1\", \"var2\", \"var3\"]\n assert list(ds.data_vars.keys()) == [\"var1\", \"var2\", \"var3\"]\n assert \"var1\" in ds.data_vars\n assert \"dim1\" not in ds.data_vars\n assert \"numbers\" not in ds.data_vars\n assert len(ds.data_vars) == 3\n\n assert set(ds.indexes) == {\"dim2\", \"dim3\", \"time\"}\n assert len(ds.indexes) == 3\n assert \"dim2\" in repr(ds.indexes)\n\n assert list(ds.coords) == [\"time\", \"dim2\", \"dim3\", \"numbers\"]\n assert \"dim2\" in ds.coords\n assert \"numbers\" in ds.coords\n assert \"var1\" not in ds.coords\n assert \"dim1\" not in ds.coords\n assert len(ds.coords) == 4\n\n assert Dataset({\"x\": np.int64(1), \"y\": np.float32([1, 2])}).nbytes == 16\n\n def test_asarray(self):\n ds = Dataset({\"x\": 0})\n with raises_regex(TypeError, \"cannot directly convert\"):\n np.asarray(ds)\n\n def test_get_index(self):\n ds = Dataset({\"foo\": ((\"x\", \"y\"), np.zeros((2, 3)))}, coords={\"x\": [\"a\", \"b\"]})\n assert ds.get_index(\"x\").equals(pd.Index([\"a\", \"b\"]))\n assert ds.get_index(\"y\").equals(pd.Index([0, 1, 2]))\n with pytest.raises(KeyError):\n ds.get_index(\"z\")\n\n def test_attr_access(self):\n ds = Dataset(\n {\"tmin\": (\"x\", [42], {\"units\": \"Celcius\"})}, attrs={\"title\": \"My test data\"}\n )\n assert_identical(ds.tmin, ds[\"tmin\"])\n assert_identical(ds.tmin.x, ds.x)\n\n assert ds.title == ds.attrs[\"title\"]\n assert ds.tmin.units == ds[\"tmin\"].attrs[\"units\"]\n\n assert {\"tmin\", \"title\"} <= set(dir(ds))\n assert \"units\" in set(dir(ds.tmin))\n\n # should defer to variable of same name\n ds.attrs[\"tmin\"] = -999\n assert ds.attrs[\"tmin\"] == -999\n assert_identical(ds.tmin, ds[\"tmin\"])\n\n def test_variable(self):\n a = Dataset()\n d = np.random.random((10, 3))\n a[\"foo\"] = ((\"time\", \"x\"), d)\n assert \"foo\" in a.variables\n assert \"foo\" in a\n a[\"bar\"] = ((\"time\", \"x\"), d)\n # order of creation is preserved\n assert list(a.variables) == [\"foo\", \"bar\"]\n assert_array_equal(a[\"foo\"].values, d)\n # try to add variable with dim (10,3) with data that's (3,10)\n with pytest.raises(ValueError):\n a[\"qux\"] = ((\"time\", \"x\"), d.T)\n\n def test_modify_inplace(self):\n a = Dataset()\n vec = np.random.random((10,))\n attributes = {\"foo\": \"bar\"}\n a[\"x\"] = (\"x\", vec, attributes)\n assert \"x\" in a.coords\n assert isinstance(a.coords[\"x\"].to_index(), pd.Index)\n assert_identical(a.coords[\"x\"].variable, a.variables[\"x\"])\n b = Dataset()\n b[\"x\"] = (\"x\", vec, attributes)\n assert_identical(a[\"x\"], b[\"x\"])\n assert a.dims == b.dims\n # this should work\n a[\"x\"] = (\"x\", vec[:5])\n a[\"z\"] = (\"x\", np.arange(5))\n with pytest.raises(ValueError):\n # now it shouldn't, since there is a conflicting length\n a[\"x\"] = (\"x\", vec[:4])\n arr = np.random.random((10, 1))\n scal = np.array(0)\n with pytest.raises(ValueError):\n a[\"y\"] = (\"y\", arr)\n with pytest.raises(ValueError):\n a[\"y\"] = (\"y\", scal)\n assert \"y\" not in a.dims\n\n def test_coords_properties(self):\n # use int64 for repr consistency on windows\n data = Dataset(\n {\n \"x\": (\"x\", np.array([-1, -2], \"int64\")),\n \"y\": (\"y\", np.array([0, 1, 2], \"int64\")),\n \"foo\": ([\"x\", \"y\"], np.random.randn(2, 3)),\n },\n {\"a\": (\"x\", np.array([4, 5], \"int64\")), \"b\": np.int64(-10)},\n )\n\n assert 4 == len(data.coords)\n\n assert [\"x\", \"y\", \"a\", \"b\"] == list(data.coords)\n\n assert_identical(data.coords[\"x\"].variable, data[\"x\"].variable)\n assert_identical(data.coords[\"y\"].variable, data[\"y\"].variable)\n\n assert \"x\" in data.coords\n assert \"a\" in data.coords\n assert 0 not in data.coords\n assert \"foo\" not in data.coords\n\n with pytest.raises(KeyError):\n data.coords[\"foo\"]\n with pytest.raises(KeyError):\n data.coords[0]\n\n expected = dedent(\n \"\"\"\\\n Coordinates:\n * x (x) int64 -1 -2\n * y (y) int64 0 1 2\n a (x) int64 4 5\n b int64 -10\"\"\"\n )\n actual = repr(data.coords)\n assert expected == actual\n\n assert {\"x\": 2, \"y\": 3} == data.coords.dims\n\n def test_coords_modify(self):\n data = Dataset(\n {\n \"x\": (\"x\", [-1, -2]),\n \"y\": (\"y\", [0, 1, 2]),\n \"foo\": ([\"x\", \"y\"], np.random.randn(2, 3)),\n },\n {\"a\": (\"x\", [4, 5]), \"b\": -10},\n )\n\n actual = data.copy(deep=True)\n actual.coords[\"x\"] = (\"x\", [\"a\", \"b\"])\n assert_array_equal(actual[\"x\"], [\"a\", \"b\"])\n\n actual = data.copy(deep=True)\n actual.coords[\"z\"] = (\"z\", [\"a\", \"b\"])\n assert_array_equal(actual[\"z\"], [\"a\", \"b\"])\n\n actual = data.copy(deep=True)\n with raises_regex(ValueError, \"conflicting sizes\"):\n actual.coords[\"x\"] = (\"x\", [-1])\n assert_identical(actual, data) # should not be modified\n\n actual = data.copy()\n del actual.coords[\"b\"]\n expected = data.reset_coords(\"b\", drop=True)\n assert_identical(expected, actual)\n\n with pytest.raises(KeyError):\n del data.coords[\"not_found\"]\n\n with pytest.raises(KeyError):\n del data.coords[\"foo\"]\n\n actual = data.copy(deep=True)\n actual.coords.update({\"c\": 11})\n expected = data.merge({\"c\": 11}).set_coords(\"c\")\n assert_identical(expected, actual)\n\n # regression test for GH3746\n del actual.coords[\"x\"]\n assert \"x\" not in actual.indexes\n\n def test_update_index(self):\n actual = Dataset(coords={\"x\": [1, 2, 3]})\n actual[\"x\"] = [\"a\", \"b\", \"c\"]\n assert actual.indexes[\"x\"].equals(pd.Index([\"a\", \"b\", \"c\"]))\n\n def test_coords_setitem_with_new_dimension(self):\n actual = Dataset()\n actual.coords[\"foo\"] = (\"x\", [1, 2, 3])\n expected = Dataset(coords={\"foo\": (\"x\", [1, 2, 3])})\n assert_identical(expected, actual)\n\n def test_coords_setitem_multiindex(self):\n data = create_test_multiindex()\n with raises_regex(ValueError, \"conflicting MultiIndex\"):\n data.coords[\"level_1\"] = range(4)\n\n def test_coords_set(self):\n one_coord = Dataset({\"x\": (\"x\", [0]), \"yy\": (\"x\", [1]), \"zzz\": (\"x\", [2])})\n two_coords = Dataset({\"zzz\": (\"x\", [2])}, {\"x\": (\"x\", [0]), \"yy\": (\"x\", [1])})\n all_coords = Dataset(\n coords={\"x\": (\"x\", [0]), \"yy\": (\"x\", [1]), \"zzz\": (\"x\", [2])}\n )\n\n actual = one_coord.set_coords(\"x\")\n assert_identical(one_coord, actual)\n actual = one_coord.set_coords([\"x\"])\n assert_identical(one_coord, actual)\n\n actual = one_coord.set_coords(\"yy\")\n assert_identical(two_coords, actual)\n\n actual = one_coord.set_coords([\"yy\", \"zzz\"])\n assert_identical(all_coords, actual)\n\n actual = one_coord.reset_coords()\n assert_identical(one_coord, actual)\n actual = two_coords.reset_coords()\n assert_identical(one_coord, actual)\n actual = all_coords.reset_coords()\n assert_identical(one_coord, actual)\n\n actual = all_coords.reset_coords([\"yy\", \"zzz\"])\n assert_identical(one_coord, actual)\n actual = all_coords.reset_coords(\"zzz\")\n assert_identical(two_coords, actual)\n\n with raises_regex(ValueError, \"cannot remove index\"):\n one_coord.reset_coords(\"x\")\n\n actual = all_coords.reset_coords(\"zzz\", drop=True)\n expected = all_coords.drop_vars(\"zzz\")\n assert_identical(expected, actual)\n expected = two_coords.drop_vars(\"zzz\")\n assert_identical(expected, actual)\n\n def test_coords_to_dataset(self):\n orig = Dataset({\"foo\": (\"y\", [-1, 0, 1])}, {\"x\": 10, \"y\": [2, 3, 4]})\n expected = Dataset(coords={\"x\": 10, \"y\": [2, 3, 4]})\n actual = orig.coords.to_dataset()\n assert_identical(expected, actual)\n\n def test_coords_merge(self):\n orig_coords = Dataset(coords={\"a\": (\"x\", [1, 2]), \"x\": [0, 1]}).coords\n other_coords = Dataset(coords={\"b\": (\"x\", [\"a\", \"b\"]), \"x\": [0, 1]}).coords\n expected = Dataset(\n coords={\"a\": (\"x\", [1, 2]), \"b\": (\"x\", [\"a\", \"b\"]), \"x\": [0, 1]}\n )\n actual = orig_coords.merge(other_coords)\n assert_identical(expected, actual)\n actual = other_coords.merge(orig_coords)\n assert_identical(expected, actual)\n\n other_coords = Dataset(coords={\"x\": (\"x\", [\"a\"])}).coords\n with pytest.raises(MergeError):\n orig_coords.merge(other_coords)\n other_coords = Dataset(coords={\"x\": (\"x\", [\"a\", \"b\"])}).coords\n with pytest.raises(MergeError):\n orig_coords.merge(other_coords)\n other_coords = Dataset(coords={\"x\": (\"x\", [\"a\", \"b\", \"c\"])}).coords\n with pytest.raises(MergeError):\n orig_coords.merge(other_coords)\n\n other_coords = Dataset(coords={\"a\": (\"x\", [8, 9])}).coords\n expected = Dataset(coords={\"x\": range(2)})\n actual = orig_coords.merge(other_coords)\n assert_identical(expected, actual)\n actual = other_coords.merge(orig_coords)\n assert_identical(expected, actual)\n\n other_coords = Dataset(coords={\"x\": np.nan}).coords\n actual = orig_coords.merge(other_coords)\n assert_identical(orig_coords.to_dataset(), actual)\n actual = other_coords.merge(orig_coords)\n assert_identical(orig_coords.to_dataset(), actual)\n\n def test_coords_merge_mismatched_shape(self):\n orig_coords = Dataset(coords={\"a\": (\"x\", [1, 1])}).coords\n other_coords = Dataset(coords={\"a\": 1}).coords\n expected = orig_coords.to_dataset()\n actual = orig_coords.merge(other_coords)\n assert_identical(expected, actual)\n\n other_coords = Dataset(coords={\"a\": (\"y\", [1])}).coords\n expected = Dataset(coords={\"a\": ([\"x\", \"y\"], [[1], [1]])})\n actual = orig_coords.merge(other_coords)\n assert_identical(expected, actual)\n\n actual = other_coords.merge(orig_coords)\n assert_identical(expected.transpose(), actual)\n\n orig_coords = Dataset(coords={\"a\": (\"x\", [np.nan])}).coords\n other_coords = Dataset(coords={\"a\": np.nan}).coords\n expected = orig_coords.to_dataset()\n actual = orig_coords.merge(other_coords)\n assert_identical(expected, actual)\n\n def test_data_vars_properties(self):\n ds = Dataset()\n ds[\"foo\"] = ((\"x\",), [1.0])\n ds[\"bar\"] = 2.0\n\n assert set(ds.data_vars) == {\"foo\", \"bar\"}\n assert \"foo\" in ds.data_vars\n assert \"x\" not in ds.data_vars\n assert_identical(ds[\"foo\"], ds.data_vars[\"foo\"])\n\n expected = dedent(\n \"\"\"\\\n Data variables:\n foo (x) float64 1.0\n bar float64 2.0\"\"\"\n )\n actual = repr(ds.data_vars)\n assert expected == actual\n\n def test_equals_and_identical(self):\n data = create_test_data(seed=42)\n assert data.equals(data)\n assert data.identical(data)\n\n data2 = create_test_data(seed=42)\n data2.attrs[\"foobar\"] = \"baz\"\n assert data.equals(data2)\n assert not data.identical(data2)\n\n del data2[\"time\"]\n assert not data.equals(data2)\n\n data = create_test_data(seed=42).rename({\"var1\": None})\n assert data.equals(data)\n assert data.identical(data)\n\n data2 = data.reset_coords()\n assert not data2.equals(data)\n assert not data2.identical(data)\n\n def test_equals_failures(self):\n data = create_test_data()\n assert not data.equals(\"foo\")\n assert not data.identical(123)\n assert not data.broadcast_equals({1: 2})\n\n def test_broadcast_equals(self):\n data1 = Dataset(coords={\"x\": 0})\n data2 = Dataset(coords={\"x\": [0]})\n assert data1.broadcast_equals(data2)\n assert not data1.equals(data2)\n assert not data1.identical(data2)\n\n def test_attrs(self):\n data = create_test_data(seed=42)\n data.attrs = {\"foobar\": \"baz\"}\n assert data.attrs[\"foobar\"], \"baz\"\n assert isinstance(data.attrs, dict)\n\n @requires_dask\n def test_chunk(self):\n data = create_test_data()\n for v in data.variables.values():\n assert isinstance(v.data, np.ndarray)\n assert data.chunks == {}\n\n reblocked = data.chunk()\n for k, v in reblocked.variables.items():\n if k in reblocked.dims:\n assert isinstance(v.data, np.ndarray)\n else:\n assert isinstance(v.data, da.Array)\n\n expected_chunks = {\"dim1\": (8,), \"dim2\": (9,), \"dim3\": (10,)}\n assert reblocked.chunks == expected_chunks\n\n def get_dask_names(ds):\n return {k: v.data.name for k, v in ds.items()}\n\n orig_dask_names = get_dask_names(reblocked)\n\n reblocked = data.chunk({\"time\": 5, \"dim1\": 5, \"dim2\": 5, \"dim3\": 5})\n # time is not a dim in any of the data_vars, so it\n # doesn't get chunked\n expected_chunks = {\"dim1\": (5, 3), \"dim2\": (5, 4), \"dim3\": (5, 5)}\n assert reblocked.chunks == expected_chunks\n\n # make sure dask names change when rechunking by different amounts\n # regression test for GH3350\n new_dask_names = get_dask_names(reblocked)\n for k, v in new_dask_names.items():\n assert v != orig_dask_names[k]\n\n reblocked = data.chunk(expected_chunks)\n assert reblocked.chunks == expected_chunks\n\n # reblock on already blocked data\n orig_dask_names = get_dask_names(reblocked)\n reblocked = reblocked.chunk(expected_chunks)\n new_dask_names = get_dask_names(reblocked)\n assert reblocked.chunks == expected_chunks\n assert_identical(reblocked, data)\n # recuhnking with same chunk sizes should not change names\n for k, v in new_dask_names.items():\n assert v == orig_dask_names[k]\n\n with raises_regex(ValueError, \"some chunks\"):\n data.chunk({\"foo\": 10})\n\n @requires_dask\n def test_dask_is_lazy(self):\n store = InaccessibleVariableDataStore()\n create_test_data().dump_to_store(store)\n ds = open_dataset(store).chunk()\n\n with pytest.raises(UnexpectedDataAccess):\n ds.load()\n with pytest.raises(UnexpectedDataAccess):\n ds[\"var1\"].values\n\n # these should not raise UnexpectedDataAccess:\n ds.var1.data\n ds.isel(time=10)\n ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1)\n ds.transpose()\n ds.mean()\n ds.fillna(0)\n ds.rename({\"dim1\": \"foobar\"})\n ds.set_coords(\"var1\")\n ds.drop_vars(\"var1\")\n\n def test_isel(self):\n data = create_test_data()\n slicers = {\"dim1\": slice(None, None, 2), \"dim2\": slice(0, 2)}\n ret = data.isel(**slicers)\n\n # Verify that only the specified dimension was altered\n assert list(data.dims) == list(ret.dims)\n for d in data.dims:\n if d in slicers:\n assert ret.dims[d] == np.arange(data.dims[d])[slicers[d]].size\n else:\n assert data.dims[d] == ret.dims[d]\n # Verify that the data is what we expect\n for v in data.variables:\n assert data[v].dims == ret[v].dims\n assert data[v].attrs == ret[v].attrs\n slice_list = [slice(None)] * data[v].values.ndim\n for d, s in slicers.items():\n if d in data[v].dims:\n inds = np.nonzero(np.array(data[v].dims) == d)[0]\n for ind in inds:\n slice_list[ind] = s\n expected = data[v].values[tuple(slice_list)]\n actual = ret[v].values\n np.testing.assert_array_equal(expected, actual)\n\n with pytest.raises(ValueError):\n data.isel(not_a_dim=slice(0, 2))\n with raises_regex(\n ValueError,\n r\"dimensions {'not_a_dim'} do not exist. Expected \"\n r\"one or more of \"\n r\"[\\w\\W]*'time'[\\w\\W]*'dim\\d'[\\w\\W]*'dim\\d'[\\w\\W]*'dim\\d'[\\w\\W]*\",\n ):\n data.isel(not_a_dim=slice(0, 2))\n with pytest.warns(\n UserWarning,\n match=r\"dimensions {'not_a_dim'} do not exist. \"\n r\"Expected one or more of \"\n r\"[\\w\\W]*'time'[\\w\\W]*'dim\\d'[\\w\\W]*'dim\\d'[\\w\\W]*'dim\\d'[\\w\\W]*\",\n ):\n data.isel(not_a_dim=slice(0, 2), missing_dims=\"warn\")\n assert_identical(data, data.isel(not_a_dim=slice(0, 2), missing_dims=\"ignore\"))\n\n ret = data.isel(dim1=0)\n assert {\"time\": 20, \"dim2\": 9, \"dim3\": 10} == ret.dims\n assert set(data.data_vars) == set(ret.data_vars)\n assert set(data.coords) == set(ret.coords)\n assert set(data.indexes) == set(ret.indexes)\n\n ret = data.isel(time=slice(2), dim1=0, dim2=slice(5))\n assert {\"time\": 2, \"dim2\": 5, \"dim3\": 10} == ret.dims\n assert set(data.data_vars) == set(ret.data_vars)\n assert set(data.coords) == set(ret.coords)\n assert set(data.indexes) == set(ret.indexes)\n\n ret = data.isel(time=0, dim1=0, dim2=slice(5))\n assert {\"dim2\": 5, \"dim3\": 10} == ret.dims\n assert set(data.data_vars) == set(ret.data_vars)\n assert set(data.coords) == set(ret.coords)\n assert set(data.indexes) == set(list(ret.indexes) + [\"time\"])\n\n def test_isel_fancy(self):\n # isel with fancy indexing.\n data = create_test_data()\n\n pdim1 = [1, 2, 3]\n pdim2 = [4, 5, 1]\n pdim3 = [1, 2, 3]\n actual = data.isel(\n dim1=((\"test_coord\",), pdim1),\n dim2=((\"test_coord\",), pdim2),\n dim3=((\"test_coord\",), pdim3),\n )\n assert \"test_coord\" in actual.dims\n assert actual.coords[\"test_coord\"].shape == (len(pdim1),)\n\n # Should work with DataArray\n actual = data.isel(\n dim1=DataArray(pdim1, dims=\"test_coord\"),\n dim2=((\"test_coord\",), pdim2),\n dim3=((\"test_coord\",), pdim3),\n )\n assert \"test_coord\" in actual.dims\n assert actual.coords[\"test_coord\"].shape == (len(pdim1),)\n expected = data.isel(\n dim1=((\"test_coord\",), pdim1),\n dim2=((\"test_coord\",), pdim2),\n dim3=((\"test_coord\",), pdim3),\n )\n assert_identical(actual, expected)\n\n # DataArray with coordinate\n idx1 = DataArray(pdim1, dims=[\"a\"], coords={\"a\": np.random.randn(3)})\n idx2 = DataArray(pdim2, dims=[\"b\"], coords={\"b\": np.random.randn(3)})\n idx3 = DataArray(pdim3, dims=[\"c\"], coords={\"c\": np.random.randn(3)})\n # Should work with DataArray\n actual = data.isel(dim1=idx1, dim2=idx2, dim3=idx3)\n assert \"a\" in actual.dims\n assert \"b\" in actual.dims\n assert \"c\" in actual.dims\n assert \"time\" in actual.coords\n assert \"dim2\" in actual.coords\n assert \"dim3\" in actual.coords\n expected = data.isel(\n dim1=((\"a\",), pdim1), dim2=((\"b\",), pdim2), dim3=((\"c\",), pdim3)\n )\n expected = expected.assign_coords(a=idx1[\"a\"], b=idx2[\"b\"], c=idx3[\"c\"])\n assert_identical(actual, expected)\n\n idx1 = DataArray(pdim1, dims=[\"a\"], coords={\"a\": np.random.randn(3)})\n idx2 = DataArray(pdim2, dims=[\"a\"])\n idx3 = DataArray(pdim3, dims=[\"a\"])\n # Should work with DataArray\n actual = data.isel(dim1=idx1, dim2=idx2, dim3=idx3)\n assert \"a\" in actual.dims\n assert \"time\" in actual.coords\n assert \"dim2\" in actual.coords\n assert \"dim3\" in actual.coords\n expected = data.isel(\n dim1=((\"a\",), pdim1), dim2=((\"a\",), pdim2), dim3=((\"a\",), pdim3)\n )\n expected = expected.assign_coords(a=idx1[\"a\"])\n assert_identical(actual, expected)\n\n actual = data.isel(dim1=((\"points\",), pdim1), dim2=((\"points\",), pdim2))\n assert \"points\" in actual.dims\n assert \"dim3\" in actual.dims\n assert \"dim3\" not in actual.data_vars\n np.testing.assert_array_equal(data[\"dim2\"][pdim2], actual[\"dim2\"])\n\n # test that the order of the indexers doesn't matter\n assert_identical(\n data.isel(dim1=((\"points\",), pdim1), dim2=((\"points\",), pdim2)),\n data.isel(dim2=((\"points\",), pdim2), dim1=((\"points\",), pdim1)),\n )\n # make sure we're raising errors in the right places\n with raises_regex(IndexError, \"Dimensions of indexers mismatch\"):\n data.isel(dim1=((\"points\",), [1, 2]), dim2=((\"points\",), [1, 2, 3]))\n with raises_regex(TypeError, \"cannot use a Dataset\"):\n data.isel(dim1=Dataset({\"points\": [1, 2]}))\n\n # test to be sure we keep around variables that were not indexed\n ds = Dataset({\"x\": [1, 2, 3, 4], \"y\": 0})\n actual = ds.isel(x=((\"points\",), [0, 1, 2]))\n assert_identical(ds[\"y\"], actual[\"y\"])\n\n # tests using index or DataArray as indexers\n stations = Dataset()\n stations[\"station\"] = ((\"station\",), [\"A\", \"B\", \"C\"])\n stations[\"dim1s\"] = ((\"station\",), [1, 2, 3])\n stations[\"dim2s\"] = ((\"station\",), [4, 5, 1])\n\n actual = data.isel(dim1=stations[\"dim1s\"], dim2=stations[\"dim2s\"])\n assert \"station\" in actual.coords\n assert \"station\" in actual.dims\n assert_identical(actual[\"station\"].drop_vars([\"dim2\"]), stations[\"station\"])\n\n with raises_regex(ValueError, \"conflicting values for \"):\n data.isel(\n dim1=DataArray(\n [0, 1, 2], dims=\"station\", coords={\"station\": [0, 1, 2]}\n ),\n dim2=DataArray(\n [0, 1, 2], dims=\"station\", coords={\"station\": [0, 1, 3]}\n ),\n )\n\n # multi-dimensional selection\n stations = Dataset()\n stations[\"a\"] = ((\"a\",), [\"A\", \"B\", \"C\"])\n stations[\"b\"] = ((\"b\",), [0, 1])\n stations[\"dim1s\"] = ((\"a\", \"b\"), [[1, 2], [2, 3], [3, 4]])\n stations[\"dim2s\"] = ((\"a\",), [4, 5, 1])\n actual = data.isel(dim1=stations[\"dim1s\"], dim2=stations[\"dim2s\"])\n assert \"a\" in actual.coords\n assert \"a\" in actual.dims\n assert \"b\" in actual.coords\n assert \"b\" in actual.dims\n assert \"dim2\" in actual.coords\n assert \"a\" in actual[\"dim2\"].dims\n\n assert_identical(actual[\"a\"].drop_vars([\"dim2\"]), stations[\"a\"])\n assert_identical(actual[\"b\"], stations[\"b\"])\n expected_var1 = data[\"var1\"].variable[\n stations[\"dim1s\"].variable, stations[\"dim2s\"].variable\n ]\n expected_var2 = data[\"var2\"].variable[\n stations[\"dim1s\"].variable, stations[\"dim2s\"].variable\n ]\n expected_var3 = data[\"var3\"].variable[slice(None), stations[\"dim1s\"].variable]\n assert_equal(actual[\"a\"].drop_vars(\"dim2\"), stations[\"a\"])\n assert_array_equal(actual[\"var1\"], expected_var1)\n assert_array_equal(actual[\"var2\"], expected_var2)\n assert_array_equal(actual[\"var3\"], expected_var3)\n\n def test_isel_dataarray(self):\n \"\"\" Test for indexing by DataArray \"\"\"\n data = create_test_data()\n # indexing with DataArray with same-name coordinates.\n indexing_da = DataArray(\n np.arange(1, 4), dims=[\"dim1\"], coords={\"dim1\": np.random.randn(3)}\n )\n actual = data.isel(dim1=indexing_da)\n assert_identical(indexing_da[\"dim1\"], actual[\"dim1\"])\n assert_identical(data[\"dim2\"], actual[\"dim2\"])\n\n # Conflict in the dimension coordinate\n indexing_da = DataArray(\n np.arange(1, 4), dims=[\"dim2\"], coords={\"dim2\": np.random.randn(3)}\n )\n with raises_regex(IndexError, \"dimension coordinate 'dim2'\"):\n actual = data.isel(dim2=indexing_da)\n # Also the case for DataArray\n with raises_regex(IndexError, \"dimension coordinate 'dim2'\"):\n actual = data[\"var2\"].isel(dim2=indexing_da)\n with raises_regex(IndexError, \"dimension coordinate 'dim2'\"):\n data[\"dim2\"].isel(dim2=indexing_da)\n\n # same name coordinate which does not conflict\n indexing_da = DataArray(\n np.arange(1, 4), dims=[\"dim2\"], coords={\"dim2\": data[\"dim2\"].values[1:4]}\n )\n actual = data.isel(dim2=indexing_da)\n assert_identical(actual[\"dim2\"], indexing_da[\"dim2\"])\n\n # Silently drop conflicted (non-dimensional) coordinate of indexer\n indexing_da = DataArray(\n np.arange(1, 4),\n dims=[\"dim2\"],\n coords={\n \"dim2\": data[\"dim2\"].values[1:4],\n \"numbers\": (\"dim2\", np.arange(2, 5)),\n },\n )\n actual = data.isel(dim2=indexing_da)\n assert_identical(actual[\"numbers\"], data[\"numbers\"])\n\n # boolean data array with coordinate with the same name\n indexing_da = DataArray(\n np.arange(1, 10), dims=[\"dim2\"], coords={\"dim2\": data[\"dim2\"].values}\n )\n indexing_da = indexing_da < 3\n actual = data.isel(dim2=indexing_da)\n assert_identical(actual[\"dim2\"], data[\"dim2\"][:2])\n\n # boolean data array with non-dimensioncoordinate\n indexing_da = DataArray(\n np.arange(1, 10),\n dims=[\"dim2\"],\n coords={\n \"dim2\": data[\"dim2\"].values,\n \"non_dim\": ((\"dim2\",), np.random.randn(9)),\n \"non_dim2\": 0,\n },\n )\n indexing_da = indexing_da < 3\n actual = data.isel(dim2=indexing_da)\n assert_identical(\n actual[\"dim2\"].drop_vars(\"non_dim\").drop_vars(\"non_dim2\"), data[\"dim2\"][:2]\n )\n assert_identical(actual[\"non_dim\"], indexing_da[\"non_dim\"][:2])\n assert_identical(actual[\"non_dim2\"], indexing_da[\"non_dim2\"])\n\n # non-dimension coordinate will be also attached\n indexing_da = DataArray(\n np.arange(1, 4),\n dims=[\"dim2\"],\n coords={\"non_dim\": ((\"dim2\",), np.random.randn(3))},\n )\n actual = data.isel(dim2=indexing_da)\n assert \"non_dim\" in actual\n assert \"non_dim\" in actual.coords\n\n # Index by a scalar DataArray\n indexing_da = DataArray(3, dims=[], coords={\"station\": 2})\n actual = data.isel(dim2=indexing_da)\n assert \"station\" in actual\n actual = data.isel(dim2=indexing_da[\"station\"])\n assert \"station\" in actual\n\n # indexer generated from coordinates\n indexing_ds = Dataset({}, coords={\"dim2\": [0, 1, 2]})\n with raises_regex(IndexError, \"dimension coordinate 'dim2'\"):\n actual = data.isel(dim2=indexing_ds[\"dim2\"])\n\n def test_sel(self):\n data = create_test_data()\n int_slicers = {\"dim1\": slice(None, None, 2), \"dim2\": slice(2), \"dim3\": slice(3)}\n loc_slicers = {\n \"dim1\": slice(None, None, 2),\n \"dim2\": slice(0, 0.5),\n \"dim3\": slice(\"a\", \"c\"),\n }\n assert_equal(data.isel(**int_slicers), data.sel(**loc_slicers))\n data[\"time\"] = (\"time\", pd.date_range(\"2000-01-01\", periods=20))\n assert_equal(data.isel(time=0), data.sel(time=\"2000-01-01\"))\n assert_equal(\n data.isel(time=slice(10)), data.sel(time=slice(\"2000-01-01\", \"2000-01-10\"))\n )\n assert_equal(data, data.sel(time=slice(\"1999\", \"2005\")))\n times = pd.date_range(\"2000-01-01\", periods=3)\n assert_equal(data.isel(time=slice(3)), data.sel(time=times))\n assert_equal(\n data.isel(time=slice(3)), data.sel(time=(data[\"time.dayofyear\"] <= 3))\n )\n\n td = pd.to_timedelta(np.arange(3), unit=\"days\")\n data = Dataset({\"x\": (\"td\", np.arange(3)), \"td\": td})\n assert_equal(data, data.sel(td=td))\n assert_equal(data, data.sel(td=slice(\"3 days\")))\n assert_equal(data.isel(td=0), data.sel(td=pd.Timedelta(\"0 days\")))\n assert_equal(data.isel(td=0), data.sel(td=pd.Timedelta(\"0h\")))\n assert_equal(data.isel(td=slice(1, 3)), data.sel(td=slice(\"1 days\", \"2 days\")))\n\n def test_sel_dataarray(self):\n data = create_test_data()\n\n ind = DataArray([0.0, 0.5, 1.0], dims=[\"dim2\"])\n actual = data.sel(dim2=ind)\n assert_equal(actual, data.isel(dim2=[0, 1, 2]))\n\n # with different dimension\n ind = DataArray([0.0, 0.5, 1.0], dims=[\"new_dim\"])\n actual = data.sel(dim2=ind)\n expected = data.isel(dim2=Variable(\"new_dim\", [0, 1, 2]))\n assert \"new_dim\" in actual.dims\n assert_equal(actual, expected)\n\n # Multi-dimensional\n ind = DataArray([[0.0], [0.5], [1.0]], dims=[\"new_dim\", \"new_dim2\"])\n actual = data.sel(dim2=ind)\n expected = data.isel(dim2=Variable((\"new_dim\", \"new_dim2\"), [[0], [1], [2]]))\n assert \"new_dim\" in actual.dims\n assert \"new_dim2\" in actual.dims\n assert_equal(actual, expected)\n\n # with coordinate\n ind = DataArray(\n [0.0, 0.5, 1.0], dims=[\"new_dim\"], coords={\"new_dim\": [\"a\", \"b\", \"c\"]}\n )\n actual = data.sel(dim2=ind)\n expected = data.isel(dim2=[0, 1, 2]).rename({\"dim2\": \"new_dim\"})\n assert \"new_dim\" in actual.dims\n assert \"new_dim\" in actual.coords\n assert_equal(\n actual.drop_vars(\"new_dim\").drop_vars(\"dim2\"), expected.drop_vars(\"new_dim\")\n )\n assert_equal(actual[\"new_dim\"].drop_vars(\"dim2\"), ind[\"new_dim\"])\n\n # with conflicted coordinate (silently ignored)\n ind = DataArray(\n [0.0, 0.5, 1.0], dims=[\"dim2\"], coords={\"dim2\": [\"a\", \"b\", \"c\"]}\n )\n actual = data.sel(dim2=ind)\n expected = data.isel(dim2=[0, 1, 2])\n assert_equal(actual, expected)\n\n # with conflicted coordinate (silently ignored)\n ind = DataArray(\n [0.0, 0.5, 1.0],\n dims=[\"new_dim\"],\n coords={\"new_dim\": [\"a\", \"b\", \"c\"], \"dim2\": 3},\n )\n actual = data.sel(dim2=ind)\n assert_equal(\n actual[\"new_dim\"].drop_vars(\"dim2\"), ind[\"new_dim\"].drop_vars(\"dim2\")\n )\n expected = data.isel(dim2=[0, 1, 2])\n expected[\"dim2\"] = ((\"new_dim\"), expected[\"dim2\"].values)\n assert_equal(actual[\"dim2\"].drop_vars(\"new_dim\"), expected[\"dim2\"])\n assert actual[\"var1\"].dims == (\"dim1\", \"new_dim\")\n\n # with non-dimensional coordinate\n ind = DataArray(\n [0.0, 0.5, 1.0],\n dims=[\"dim2\"],\n coords={\n \"dim2\": [\"a\", \"b\", \"c\"],\n \"numbers\": (\"dim2\", [0, 1, 2]),\n \"new_dim\": (\"dim2\", [1.1, 1.2, 1.3]),\n },\n )\n actual = data.sel(dim2=ind)\n expected = data.isel(dim2=[0, 1, 2])\n assert_equal(actual.drop_vars(\"new_dim\"), expected)\n assert np.allclose(actual[\"new_dim\"].values, ind[\"new_dim\"].values)\n\n def test_sel_dataarray_mindex(self):\n midx = pd.MultiIndex.from_product([list(\"abc\"), [0, 1]], names=(\"one\", \"two\"))\n mds = xr.Dataset(\n {\"var\": ((\"x\", \"y\"), np.random.rand(6, 3))},\n coords={\"x\": midx, \"y\": range(3)},\n )\n\n actual_isel = mds.isel(x=xr.DataArray(np.arange(3), dims=\"x\"))\n actual_sel = mds.sel(x=DataArray(mds.indexes[\"x\"][:3], dims=\"x\"))\n assert actual_isel[\"x\"].dims == (\"x\",)\n assert actual_sel[\"x\"].dims == (\"x\",)\n assert_identical(actual_isel, actual_sel)\n\n actual_isel = mds.isel(x=xr.DataArray(np.arange(3), dims=\"z\"))\n actual_sel = mds.sel(x=Variable(\"z\", mds.indexes[\"x\"][:3]))\n assert actual_isel[\"x\"].dims == (\"z\",)\n assert actual_sel[\"x\"].dims == (\"z\",)\n assert_identical(actual_isel, actual_sel)\n\n # with coordinate\n actual_isel = mds.isel(\n x=xr.DataArray(np.arange(3), dims=\"z\", coords={\"z\": [0, 1, 2]})\n )\n actual_sel = mds.sel(\n x=xr.DataArray(mds.indexes[\"x\"][:3], dims=\"z\", coords={\"z\": [0, 1, 2]})\n )\n assert actual_isel[\"x\"].dims == (\"z\",)\n assert actual_sel[\"x\"].dims == (\"z\",)\n assert_identical(actual_isel, actual_sel)\n\n # Vectorized indexing with level-variables raises an error\n with raises_regex(ValueError, \"Vectorized selection is \"):\n mds.sel(one=[\"a\", \"b\"])\n\n with raises_regex(\n ValueError,\n \"Vectorized selection is not available along MultiIndex variable: x\",\n ):\n mds.sel(\n x=xr.DataArray(\n [np.array(midx[:2]), np.array(midx[-2:])], dims=[\"a\", \"b\"]\n )\n )\n\n def test_sel_categorical(self):\n ind = pd.Series([\"foo\", \"bar\"], dtype=\"category\")\n df = pd.DataFrame({\"ind\": ind, \"values\": [1, 2]})\n ds = df.set_index(\"ind\").to_xarray()\n actual = ds.sel(ind=\"bar\")\n expected = ds.isel(ind=1)\n assert_identical(expected, actual)\n\n def test_sel_categorical_error(self):\n ind = pd.Series([\"foo\", \"bar\"], dtype=\"category\")\n df = pd.DataFrame({\"ind\": ind, \"values\": [1, 2]})\n ds = df.set_index(\"ind\").to_xarray()\n with pytest.raises(ValueError):\n ds.sel(ind=\"bar\", method=\"nearest\")\n with pytest.raises(ValueError):\n ds.sel(ind=\"bar\", tolerance=\"nearest\")\n\n def test_categorical_index(self):\n cat = pd.CategoricalIndex(\n [\"foo\", \"bar\", \"foo\"],\n categories=[\"foo\", \"bar\", \"baz\", \"qux\", \"quux\", \"corge\"],\n )\n ds = xr.Dataset(\n {\"var\": (\"cat\", np.arange(3))},\n coords={\"cat\": (\"cat\", cat), \"c\": (\"cat\", [0, 1, 1])},\n )\n # test slice\n actual = ds.sel(cat=\"foo\")\n expected = ds.isel(cat=[0, 2])\n assert_identical(expected, actual)\n # make sure the conversion to the array works\n actual = ds.sel(cat=\"foo\")[\"cat\"].values\n assert (actual == np.array([\"foo\", \"foo\"])).all()\n\n ds = ds.set_index(index=[\"cat\", \"c\"])\n actual = ds.unstack(\"index\")\n assert actual[\"var\"].shape == (2, 2)\n\n def test_categorical_reindex(self):\n cat = pd.CategoricalIndex(\n [\"foo\", \"bar\", \"baz\"],\n categories=[\"foo\", \"bar\", \"baz\", \"qux\", \"quux\", \"corge\"],\n )\n ds = xr.Dataset(\n {\"var\": (\"cat\", np.arange(3))},\n coords={\"cat\": (\"cat\", cat), \"c\": (\"cat\", [0, 1, 2])},\n )\n actual = ds.reindex(cat=[\"foo\"])[\"cat\"].values\n assert (actual == np.array([\"foo\"])).all()\n\n def test_categorical_multiindex(self):\n i1 = pd.Series([0, 0])\n cat = pd.CategoricalDtype(categories=[\"foo\", \"baz\", \"bar\"])\n i2 = pd.Series([\"baz\", \"bar\"], dtype=cat)\n\n df = pd.DataFrame({\"i1\": i1, \"i2\": i2, \"values\": [1, 2]}).set_index(\n [\"i1\", \"i2\"]\n )\n actual = df.to_xarray()\n assert actual[\"values\"].shape == (1, 2)\n\n def test_sel_drop(self):\n data = Dataset({\"foo\": (\"x\", [1, 2, 3])}, {\"x\": [0, 1, 2]})\n expected = Dataset({\"foo\": 1})\n selected = data.sel(x=0, drop=True)\n assert_identical(expected, selected)\n\n expected = Dataset({\"foo\": 1}, {\"x\": 0})\n selected = data.sel(x=0, drop=False)\n assert_identical(expected, selected)\n\n data = Dataset({\"foo\": (\"x\", [1, 2, 3])})\n expected = Dataset({\"foo\": 1})\n selected = data.sel(x=0, drop=True)\n assert_identical(expected, selected)\n\n def test_isel_drop(self):\n data = Dataset({\"foo\": (\"x\", [1, 2, 3])}, {\"x\": [0, 1, 2]})\n expected = Dataset({\"foo\": 1})\n selected = data.isel(x=0, drop=True)\n assert_identical(expected, selected)\n\n expected = Dataset({\"foo\": 1}, {\"x\": 0})\n selected = data.isel(x=0, drop=False)\n assert_identical(expected, selected)\n\n def test_head(self):\n data = create_test_data()\n\n expected = data.isel(time=slice(5), dim2=slice(6))\n actual = data.head(time=5, dim2=6)\n assert_equal(expected, actual)\n\n expected = data.isel(time=slice(0))\n actual = data.head(time=0)\n assert_equal(expected, actual)\n\n expected = data.isel({dim: slice(6) for dim in data.dims})\n actual = data.head(6)\n assert_equal(expected, actual)\n\n expected = data.isel({dim: slice(5) for dim in data.dims})\n actual = data.head()\n assert_equal(expected, actual)\n\n with raises_regex(TypeError, \"either dict-like or a single int\"):\n data.head([3])\n with raises_regex(TypeError, \"expected integer type\"):\n data.head(dim2=3.1)\n with raises_regex(ValueError, \"expected positive int\"):\n data.head(time=-3)\n\n def test_tail(self):\n data = create_test_data()\n\n expected = data.isel(time=slice(-5, None), dim2=slice(-6, None))\n actual = data.tail(time=5, dim2=6)\n assert_equal(expected, actual)\n\n expected = data.isel(dim1=slice(0))\n actual = data.tail(dim1=0)\n assert_equal(expected, actual)\n\n expected = data.isel({dim: slice(-6, None) for dim in data.dims})\n actual = data.tail(6)\n assert_equal(expected, actual)\n\n expected = data.isel({dim: slice(-5, None) for dim in data.dims})\n actual = data.tail()\n assert_equal(expected, actual)\n\n with raises_regex(TypeError, \"either dict-like or a single int\"):\n data.tail([3])\n with raises_regex(TypeError, \"expected integer type\"):\n data.tail(dim2=3.1)\n with raises_regex(ValueError, \"expected positive int\"):\n data.tail(time=-3)\n\n def test_thin(self):\n data = create_test_data()\n\n expected = data.isel(time=slice(None, None, 5), dim2=slice(None, None, 6))\n actual = data.thin(time=5, dim2=6)\n assert_equal(expected, actual)\n\n expected = data.isel({dim: slice(None, None, 6) for dim in data.dims})\n actual = data.thin(6)\n assert_equal(expected, actual)\n\n with raises_regex(TypeError, \"either dict-like or a single int\"):\n data.thin([3])\n with raises_regex(TypeError, \"expected integer type\"):\n data.thin(dim2=3.1)\n with raises_regex(ValueError, \"cannot be zero\"):\n data.thin(time=0)\n with raises_regex(ValueError, \"expected positive int\"):\n data.thin(time=-3)\n\n @pytest.mark.filterwarnings(\"ignore::DeprecationWarning\")\n def test_sel_fancy(self):\n data = create_test_data()\n\n # add in a range() index\n data[\"dim1\"] = data.dim1\n\n pdim1 = [1, 2, 3]\n pdim2 = [4, 5, 1]\n pdim3 = [1, 2, 3]\n expected = data.isel(\n dim1=Variable((\"test_coord\",), pdim1),\n dim2=Variable((\"test_coord\",), pdim2),\n dim3=Variable((\"test_coord\"), pdim3),\n )\n actual = data.sel(\n dim1=Variable((\"test_coord\",), data.dim1[pdim1]),\n dim2=Variable((\"test_coord\",), data.dim2[pdim2]),\n dim3=Variable((\"test_coord\",), data.dim3[pdim3]),\n )\n assert_identical(expected, actual)\n\n # DataArray Indexer\n idx_t = DataArray(\n data[\"time\"][[3, 2, 1]].values, dims=[\"a\"], coords={\"a\": [\"a\", \"b\", \"c\"]}\n )\n idx_2 = DataArray(\n data[\"dim2\"][[3, 2, 1]].values, dims=[\"a\"], coords={\"a\": [\"a\", \"b\", \"c\"]}\n )\n idx_3 = DataArray(\n data[\"dim3\"][[3, 2, 1]].values, dims=[\"a\"], coords={\"a\": [\"a\", \"b\", \"c\"]}\n )\n actual = data.sel(time=idx_t, dim2=idx_2, dim3=idx_3)\n expected = data.isel(\n time=Variable((\"a\",), [3, 2, 1]),\n dim2=Variable((\"a\",), [3, 2, 1]),\n dim3=Variable((\"a\",), [3, 2, 1]),\n )\n expected = expected.assign_coords(a=idx_t[\"a\"])\n assert_identical(expected, actual)\n\n idx_t = DataArray(\n data[\"time\"][[3, 2, 1]].values, dims=[\"a\"], coords={\"a\": [\"a\", \"b\", \"c\"]}\n )\n idx_2 = DataArray(\n data[\"dim2\"][[2, 1, 3]].values, dims=[\"b\"], coords={\"b\": [0, 1, 2]}\n )\n idx_3 = DataArray(\n data[\"dim3\"][[1, 2, 1]].values, dims=[\"c\"], coords={\"c\": [0.0, 1.1, 2.2]}\n )\n actual = data.sel(time=idx_t, dim2=idx_2, dim3=idx_3)\n expected = data.isel(\n time=Variable((\"a\",), [3, 2, 1]),\n dim2=Variable((\"b\",), [2, 1, 3]),\n dim3=Variable((\"c\",), [1, 2, 1]),\n )\n expected = expected.assign_coords(a=idx_t[\"a\"], b=idx_2[\"b\"], c=idx_3[\"c\"])\n assert_identical(expected, actual)\n\n # test from sel_points\n data = Dataset({\"foo\": ((\"x\", \"y\"), np.arange(9).reshape(3, 3))})\n data.coords.update({\"x\": [0, 1, 2], \"y\": [0, 1, 2]})\n\n expected = Dataset(\n {\"foo\": (\"points\", [0, 4, 8])},\n coords={\n \"x\": Variable((\"points\",), [0, 1, 2]),\n \"y\": Variable((\"points\",), [0, 1, 2]),\n },\n )\n actual = data.sel(\n x=Variable((\"points\",), [0, 1, 2]), y=Variable((\"points\",), [0, 1, 2])\n )\n assert_identical(expected, actual)\n\n expected.coords.update({\"x\": (\"points\", [0, 1, 2]), \"y\": (\"points\", [0, 1, 2])})\n actual = data.sel(\n x=Variable((\"points\",), [0.1, 1.1, 2.5]),\n y=Variable((\"points\",), [0, 1.2, 2.0]),\n method=\"pad\",\n )\n assert_identical(expected, actual)\n\n idx_x = DataArray([0, 1, 2], dims=[\"a\"], coords={\"a\": [\"a\", \"b\", \"c\"]})\n idx_y = DataArray([0, 2, 1], dims=[\"b\"], coords={\"b\": [0, 3, 6]})\n expected_ary = data[\"foo\"][[0, 1, 2], [0, 2, 1]]\n actual = data.sel(x=idx_x, y=idx_y)\n assert_array_equal(expected_ary, actual[\"foo\"])\n assert_identical(actual[\"a\"].drop_vars(\"x\"), idx_x[\"a\"])\n assert_identical(actual[\"b\"].drop_vars(\"y\"), idx_y[\"b\"])\n\n with pytest.raises(KeyError):\n data.sel(x=[2.5], y=[2.0], method=\"pad\", tolerance=1e-3)\n\n def test_sel_method(self):\n data = create_test_data()\n\n expected = data.sel(dim2=1)\n actual = data.sel(dim2=0.95, method=\"nearest\")\n assert_identical(expected, actual)\n\n actual = data.sel(dim2=0.95, method=\"nearest\", tolerance=1)\n assert_identical(expected, actual)\n\n with pytest.raises(KeyError):\n actual = data.sel(dim2=np.pi, method=\"nearest\", tolerance=0)\n\n expected = data.sel(dim2=[1.5])\n actual = data.sel(dim2=[1.45], method=\"backfill\")\n assert_identical(expected, actual)\n\n with raises_regex(NotImplementedError, \"slice objects\"):\n data.sel(dim2=slice(1, 3), method=\"ffill\")\n\n with raises_regex(TypeError, \"``method``\"):\n # this should not pass silently\n data.sel(method=data)\n\n # cannot pass method if there is no associated coordinate\n with raises_regex(ValueError, \"cannot supply\"):\n data.sel(dim1=0, method=\"nearest\")\n\n def test_loc(self):\n data = create_test_data()\n expected = data.sel(dim3=\"a\")\n actual = data.loc[dict(dim3=\"a\")]\n assert_identical(expected, actual)\n with raises_regex(TypeError, \"can only lookup dict\"):\n data.loc[\"a\"]\n with pytest.raises(TypeError):\n data.loc[dict(dim3=\"a\")] = 0\n\n def test_selection_multiindex(self):\n mindex = pd.MultiIndex.from_product(\n [[\"a\", \"b\"], [1, 2], [-1, -2]], names=(\"one\", \"two\", \"three\")\n )\n mdata = Dataset(data_vars={\"var\": (\"x\", range(8))}, coords={\"x\": mindex})\n\n def test_sel(lab_indexer, pos_indexer, replaced_idx=False, renamed_dim=None):\n ds = mdata.sel(x=lab_indexer)\n expected_ds = mdata.isel(x=pos_indexer)\n if not replaced_idx:\n assert_identical(ds, expected_ds)\n else:\n if renamed_dim:\n assert ds[\"var\"].dims[0] == renamed_dim\n ds = ds.rename({renamed_dim: \"x\"})\n assert_identical(ds[\"var\"].variable, expected_ds[\"var\"].variable)\n assert not ds[\"x\"].equals(expected_ds[\"x\"])\n\n test_sel((\"a\", 1, -1), 0)\n test_sel((\"b\", 2, -2), -1)\n test_sel((\"a\", 1), [0, 1], replaced_idx=True, renamed_dim=\"three\")\n test_sel((\"a\",), range(4), replaced_idx=True)\n test_sel(\"a\", range(4), replaced_idx=True)\n test_sel([(\"a\", 1, -1), (\"b\", 2, -2)], [0, 7])\n test_sel(slice(\"a\", \"b\"), range(8))\n test_sel(slice((\"a\", 1), (\"b\", 1)), range(6))\n test_sel({\"one\": \"a\", \"two\": 1, \"three\": -1}, 0)\n test_sel({\"one\": \"a\", \"two\": 1}, [0, 1], replaced_idx=True, renamed_dim=\"three\")\n test_sel({\"one\": \"a\"}, range(4), replaced_idx=True)\n\n assert_identical(mdata.loc[{\"x\": {\"one\": \"a\"}}], mdata.sel(x={\"one\": \"a\"}))\n assert_identical(mdata.loc[{\"x\": \"a\"}], mdata.sel(x=\"a\"))\n assert_identical(mdata.loc[{\"x\": (\"a\", 1)}], mdata.sel(x=(\"a\", 1)))\n assert_identical(mdata.loc[{\"x\": (\"a\", 1, -1)}], mdata.sel(x=(\"a\", 1, -1)))\n\n assert_identical(mdata.sel(x={\"one\": \"a\", \"two\": 1}), mdata.sel(one=\"a\", two=1))\n\n def test_broadcast_like(self):\n original1 = DataArray(\n np.random.randn(5), [(\"x\", range(5))], name=\"a\"\n ).to_dataset()\n\n original2 = DataArray(np.random.randn(6), [(\"y\", range(6))], name=\"b\")\n\n expected1, expected2 = broadcast(original1, original2)\n\n assert_identical(\n original1.broadcast_like(original2), expected1.transpose(\"y\", \"x\")\n )\n\n assert_identical(original2.broadcast_like(original1), expected2)\n\n def test_reindex_like(self):\n data = create_test_data()\n data[\"letters\"] = (\"dim3\", 10 * [\"a\"])\n\n expected = data.isel(dim1=slice(10), time=slice(13))\n actual = data.reindex_like(expected)\n assert_identical(actual, expected)\n\n expected = data.copy(deep=True)\n expected[\"dim3\"] = (\"dim3\", list(\"cdefghijkl\"))\n expected[\"var3\"][:-2] = expected[\"var3\"][2:].values\n expected[\"var3\"][-2:] = np.nan\n expected[\"letters\"] = expected[\"letters\"].astype(object)\n expected[\"letters\"][-2:] = np.nan\n expected[\"numbers\"] = expected[\"numbers\"].astype(float)\n expected[\"numbers\"][:-2] = expected[\"numbers\"][2:].values\n expected[\"numbers\"][-2:] = np.nan\n actual = data.reindex_like(expected)\n assert_identical(actual, expected)\n\n def test_reindex(self):\n data = create_test_data()\n assert_identical(data, data.reindex())\n\n expected = data.assign_coords(dim1=data[\"dim1\"])\n actual = data.reindex(dim1=data[\"dim1\"])\n assert_identical(actual, expected)\n\n actual = data.reindex(dim1=data[\"dim1\"].values)\n assert_identical(actual, expected)\n\n actual = data.reindex(dim1=data[\"dim1\"].to_index())\n assert_identical(actual, expected)\n\n with raises_regex(ValueError, \"cannot reindex or align along dimension\"):\n data.reindex(dim1=data[\"dim1\"][:5])\n\n expected = data.isel(dim2=slice(5))\n actual = data.reindex(dim2=data[\"dim2\"][:5])\n assert_identical(actual, expected)\n\n # test dict-like argument\n actual = data.reindex({\"dim2\": data[\"dim2\"]})\n expected = data\n assert_identical(actual, expected)\n with raises_regex(ValueError, \"cannot specify both\"):\n data.reindex({\"x\": 0}, x=0)\n with raises_regex(ValueError, \"dictionary\"):\n data.reindex(\"foo\")\n\n # invalid dimension\n with raises_regex(ValueError, \"invalid reindex dim\"):\n data.reindex(invalid=0)\n\n # out of order\n expected = data.sel(dim2=data[\"dim2\"][:5:-1])\n actual = data.reindex(dim2=data[\"dim2\"][:5:-1])\n assert_identical(actual, expected)\n\n # multiple fill values\n expected = data.reindex(dim2=[0.1, 2.1, 3.1, 4.1]).assign(\n var1=lambda ds: ds.var1.copy(data=[[-10, -10, -10, -10]] * len(ds.dim1)),\n var2=lambda ds: ds.var2.copy(data=[[-20, -20, -20, -20]] * len(ds.dim1)),\n )\n actual = data.reindex(\n dim2=[0.1, 2.1, 3.1, 4.1], fill_value={\"var1\": -10, \"var2\": -20}\n )\n assert_identical(actual, expected)\n # use the default value\n expected = data.reindex(dim2=[0.1, 2.1, 3.1, 4.1]).assign(\n var1=lambda ds: ds.var1.copy(data=[[-10, -10, -10, -10]] * len(ds.dim1)),\n var2=lambda ds: ds.var2.copy(\n data=[[np.nan, np.nan, np.nan, np.nan]] * len(ds.dim1)\n ),\n )\n actual = data.reindex(dim2=[0.1, 2.1, 3.1, 4.1], fill_value={\"var1\": -10})\n assert_identical(actual, expected)\n\n # regression test for #279\n expected = Dataset({\"x\": (\"time\", np.random.randn(5))}, {\"time\": range(5)})\n time2 = DataArray(np.arange(5), dims=\"time2\")\n with pytest.raises(ValueError):\n actual = expected.reindex(time=time2)\n\n # another regression test\n ds = Dataset(\n {\"foo\": ([\"x\", \"y\"], np.zeros((3, 4)))}, {\"x\": range(3), \"y\": range(4)}\n )\n expected = Dataset(\n {\"foo\": ([\"x\", \"y\"], np.zeros((3, 2)))}, {\"x\": [0, 1, 3], \"y\": [0, 1]}\n )\n expected[\"foo\"][-1] = np.nan\n actual = ds.reindex(x=[0, 1, 3], y=[0, 1])\n assert_identical(expected, actual)\n\n def test_reindex_warning(self):\n data = create_test_data()\n\n with pytest.raises(ValueError):\n # DataArray with different dimension raises Future warning\n ind = xr.DataArray([0.0, 1.0], dims=[\"new_dim\"], name=\"ind\")\n data.reindex(dim2=ind)\n\n # Should not warn\n ind = xr.DataArray([0.0, 1.0], dims=[\"dim2\"], name=\"ind\")\n with pytest.warns(None) as ws:\n data.reindex(dim2=ind)\n assert len(ws) == 0\n\n def test_reindex_variables_copied(self):\n data = create_test_data()\n reindexed_data = data.reindex(copy=False)\n for k in data.variables:\n assert reindexed_data.variables[k] is not data.variables[k]\n\n def test_reindex_method(self):\n ds = Dataset({\"x\": (\"y\", [10, 20]), \"y\": [0, 1]})\n y = [-0.5, 0.5, 1.5]\n actual = ds.reindex(y=y, method=\"backfill\")\n expected = Dataset({\"x\": (\"y\", [10, 20, np.nan]), \"y\": y})\n assert_identical(expected, actual)\n\n actual = ds.reindex(y=y, method=\"backfill\", tolerance=0.1)\n expected = Dataset({\"x\": (\"y\", 3 * [np.nan]), \"y\": y})\n assert_identical(expected, actual)\n\n actual = ds.reindex(y=y, method=\"pad\")\n expected = Dataset({\"x\": (\"y\", [np.nan, 10, 20]), \"y\": y})\n assert_identical(expected, actual)\n\n alt = Dataset({\"y\": y})\n actual = ds.reindex_like(alt, method=\"pad\")\n assert_identical(expected, actual)\n\n @pytest.mark.parametrize(\"fill_value\", [dtypes.NA, 2, 2.0, {\"x\": 2, \"z\": 1}])\n def test_reindex_fill_value(self, fill_value):\n ds = Dataset({\"x\": (\"y\", [10, 20]), \"z\": (\"y\", [-20, -10]), \"y\": [0, 1]})\n y = [0, 1, 2]\n actual = ds.reindex(y=y, fill_value=fill_value)\n if fill_value == dtypes.NA:\n # if we supply the default, we expect the missing value for a\n # float array\n fill_value_x = fill_value_z = np.nan\n elif isinstance(fill_value, dict):\n fill_value_x = fill_value[\"x\"]\n fill_value_z = fill_value[\"z\"]\n else:\n fill_value_x = fill_value_z = fill_value\n expected = Dataset(\n {\n \"x\": (\"y\", [10, 20, fill_value_x]),\n \"z\": (\"y\", [-20, -10, fill_value_z]),\n \"y\": y,\n }\n )\n assert_identical(expected, actual)\n\n @pytest.mark.parametrize(\"fill_value\", [dtypes.NA, 2, 2.0, {\"x\": 2, \"z\": 1}])\n def test_reindex_like_fill_value(self, fill_value):\n ds = Dataset({\"x\": (\"y\", [10, 20]), \"z\": (\"y\", [-20, -10]), \"y\": [0, 1]})\n y = [0, 1, 2]\n alt = Dataset({\"y\": y})\n actual = ds.reindex_like(alt, fill_value=fill_value)\n if fill_value == dtypes.NA:\n # if we supply the default, we expect the missing value for a\n # float array\n fill_value_x = fill_value_z = np.nan\n elif isinstance(fill_value, dict):\n fill_value_x = fill_value[\"x\"]\n fill_value_z = fill_value[\"z\"]\n else:\n fill_value_x = fill_value_z = fill_value\n expected = Dataset(\n {\n \"x\": (\"y\", [10, 20, fill_value_x]),\n \"z\": (\"y\", [-20, -10, fill_value_z]),\n \"y\": y,\n }\n )\n assert_identical(expected, actual)\n\n @pytest.mark.parametrize(\"fill_value\", [dtypes.NA, 2, 2.0, {\"foo\": 2, \"bar\": 1}])\n def test_align_fill_value(self, fill_value):\n x = Dataset({\"foo\": DataArray([1, 2], dims=[\"x\"], coords={\"x\": [1, 2]})})\n y = Dataset({\"bar\": DataArray([1, 2], dims=[\"x\"], coords={\"x\": [1, 3]})})\n x2, y2 = align(x, y, join=\"outer\", fill_value=fill_value)\n if fill_value == dtypes.NA:\n # if we supply the default, we expect the missing value for a\n # float array\n fill_value_foo = fill_value_bar = np.nan\n elif isinstance(fill_value, dict):\n fill_value_foo = fill_value[\"foo\"]\n fill_value_bar = fill_value[\"bar\"]\n else:\n fill_value_foo = fill_value_bar = fill_value\n\n expected_x2 = Dataset(\n {\n \"foo\": DataArray(\n [1, 2, fill_value_foo], dims=[\"x\"], coords={\"x\": [1, 2, 3]}\n )\n }\n )\n expected_y2 = Dataset(\n {\n \"bar\": DataArray(\n [1, fill_value_bar, 2], dims=[\"x\"], coords={\"x\": [1, 2, 3]}\n )\n }\n )\n assert_identical(expected_x2, x2)\n assert_identical(expected_y2, y2)\n\n def test_align(self):\n left = create_test_data()\n right = left.copy(deep=True)\n right[\"dim3\"] = (\"dim3\", list(\"cdefghijkl\"))\n right[\"var3\"][:-2] = right[\"var3\"][2:].values\n right[\"var3\"][-2:] = np.random.randn(*right[\"var3\"][-2:].shape)\n right[\"numbers\"][:-2] = right[\"numbers\"][2:].values\n right[\"numbers\"][-2:] = -10\n\n intersection = list(\"cdefghij\")\n union = list(\"abcdefghijkl\")\n\n left2, right2 = align(left, right, join=\"inner\")\n assert_array_equal(left2[\"dim3\"], intersection)\n assert_identical(left2, right2)\n\n left2, right2 = align(left, right, join=\"outer\")\n\n assert_array_equal(left2[\"dim3\"], union)\n assert_equal(left2[\"dim3\"].variable, right2[\"dim3\"].variable)\n\n assert_identical(left2.sel(dim3=intersection), right2.sel(dim3=intersection))\n assert np.isnan(left2[\"var3\"][-2:]).all()\n assert np.isnan(right2[\"var3\"][:2]).all()\n\n left2, right2 = align(left, right, join=\"left\")\n assert_equal(left2[\"dim3\"].variable, right2[\"dim3\"].variable)\n assert_equal(left2[\"dim3\"].variable, left[\"dim3\"].variable)\n\n assert_identical(left2.sel(dim3=intersection), right2.sel(dim3=intersection))\n assert np.isnan(right2[\"var3\"][:2]).all()\n\n left2, right2 = align(left, right, join=\"right\")\n assert_equal(left2[\"dim3\"].variable, right2[\"dim3\"].variable)\n assert_equal(left2[\"dim3\"].variable, right[\"dim3\"].variable)\n\n assert_identical(left2.sel(dim3=intersection), right2.sel(dim3=intersection))\n\n assert np.isnan(left2[\"var3\"][-2:]).all()\n\n with raises_regex(ValueError, \"invalid value for join\"):\n align(left, right, join=\"foobar\")\n with pytest.raises(TypeError):\n align(left, right, foo=\"bar\")\n\n def test_align_exact(self):\n left = xr.Dataset(coords={\"x\": [0, 1]})\n right = xr.Dataset(coords={\"x\": [1, 2]})\n\n left1, left2 = xr.align(left, left, join=\"exact\")\n assert_identical(left1, left)\n assert_identical(left2, left)\n\n with raises_regex(ValueError, \"indexes .* not equal\"):\n xr.align(left, right, join=\"exact\")\n\n def test_align_override(self):\n left = xr.Dataset(coords={\"x\": [0, 1, 2]})\n right = xr.Dataset(coords={\"x\": [0.1, 1.1, 2.1], \"y\": [1, 2, 3]})\n expected_right = xr.Dataset(coords={\"x\": [0, 1, 2], \"y\": [1, 2, 3]})\n\n new_left, new_right = xr.align(left, right, join=\"override\")\n assert_identical(left, new_left)\n assert_identical(new_right, expected_right)\n\n new_left, new_right = xr.align(left, right, exclude=\"x\", join=\"override\")\n assert_identical(left, new_left)\n assert_identical(right, new_right)\n\n new_left, new_right = xr.align(\n left.isel(x=0, drop=True), right, exclude=\"x\", join=\"override\"\n )\n assert_identical(left.isel(x=0, drop=True), new_left)\n assert_identical(right, new_right)\n\n with raises_regex(ValueError, \"Indexes along dimension 'x' don't have\"):\n xr.align(left.isel(x=0).expand_dims(\"x\"), right, join=\"override\")\n\n def test_align_exclude(self):\n x = Dataset(\n {\n \"foo\": DataArray(\n [[1, 2], [3, 4]], dims=[\"x\", \"y\"], coords={\"x\": [1, 2], \"y\": [3, 4]}\n )\n }\n )\n y = Dataset(\n {\n \"bar\": DataArray(\n [[1, 2], [3, 4]], dims=[\"x\", \"y\"], coords={\"x\": [1, 3], \"y\": [5, 6]}\n )\n }\n )\n x2, y2 = align(x, y, exclude=[\"y\"], join=\"outer\")\n\n expected_x2 = Dataset(\n {\n \"foo\": DataArray(\n [[1, 2], [3, 4], [np.nan, np.nan]],\n dims=[\"x\", \"y\"],\n coords={\"x\": [1, 2, 3], \"y\": [3, 4]},\n )\n }\n )\n expected_y2 = Dataset(\n {\n \"bar\": DataArray(\n [[1, 2], [np.nan, np.nan], [3, 4]],\n dims=[\"x\", \"y\"],\n coords={\"x\": [1, 2, 3], \"y\": [5, 6]},\n )\n }\n )\n assert_identical(expected_x2, x2)\n assert_identical(expected_y2, y2)\n\n def test_align_nocopy(self):\n x = Dataset({\"foo\": DataArray([1, 2, 3], coords=[(\"x\", [1, 2, 3])])})\n y = Dataset({\"foo\": DataArray([1, 2], coords=[(\"x\", [1, 2])])})\n expected_x2 = x\n expected_y2 = Dataset(\n {\"foo\": DataArray([1, 2, np.nan], coords=[(\"x\", [1, 2, 3])])}\n )\n\n x2, y2 = align(x, y, copy=False, join=\"outer\")\n assert_identical(expected_x2, x2)\n assert_identical(expected_y2, y2)\n assert source_ndarray(x[\"foo\"].data) is source_ndarray(x2[\"foo\"].data)\n\n x2, y2 = align(x, y, copy=True, join=\"outer\")\n assert source_ndarray(x[\"foo\"].data) is not source_ndarray(x2[\"foo\"].data)\n assert_identical(expected_x2, x2)\n assert_identical(expected_y2, y2)\n\n def test_align_indexes(self):\n x = Dataset({\"foo\": DataArray([1, 2, 3], dims=\"x\", coords=[(\"x\", [1, 2, 3])])})\n (x2,) = align(x, indexes={\"x\": [2, 3, 1]})\n expected_x2 = Dataset(\n {\"foo\": DataArray([2, 3, 1], dims=\"x\", coords={\"x\": [2, 3, 1]})}\n )\n\n assert_identical(expected_x2, x2)\n\n def test_align_non_unique(self):\n x = Dataset({\"foo\": (\"x\", [3, 4, 5]), \"x\": [0, 0, 1]})\n x1, x2 = align(x, x)\n assert x1.identical(x) and x2.identical(x)\n\n y = Dataset({\"bar\": (\"x\", [6, 7]), \"x\": [0, 1]})\n with raises_regex(ValueError, \"cannot reindex or align\"):\n align(x, y)\n\n def test_broadcast(self):\n ds = Dataset(\n {\"foo\": 0, \"bar\": (\"x\", [1]), \"baz\": (\"y\", [2, 3])}, {\"c\": (\"x\", [4])}\n )\n expected = Dataset(\n {\n \"foo\": ((\"x\", \"y\"), [[0, 0]]),\n \"bar\": ((\"x\", \"y\"), [[1, 1]]),\n \"baz\": ((\"x\", \"y\"), [[2, 3]]),\n },\n {\"c\": (\"x\", [4])},\n )\n (actual,) = broadcast(ds)\n assert_identical(expected, actual)\n\n ds_x = Dataset({\"foo\": (\"x\", [1])})\n ds_y = Dataset({\"bar\": (\"y\", [2, 3])})\n expected_x = Dataset({\"foo\": ((\"x\", \"y\"), [[1, 1]])})\n expected_y = Dataset({\"bar\": ((\"x\", \"y\"), [[2, 3]])})\n actual_x, actual_y = broadcast(ds_x, ds_y)\n assert_identical(expected_x, actual_x)\n assert_identical(expected_y, actual_y)\n\n array_y = ds_y[\"bar\"]\n expected_y = expected_y[\"bar\"]\n actual_x, actual_y = broadcast(ds_x, array_y)\n assert_identical(expected_x, actual_x)\n assert_identical(expected_y, actual_y)\n\n def test_broadcast_nocopy(self):\n # Test that data is not copied if not needed\n x = Dataset({\"foo\": ((\"x\", \"y\"), [[1, 1]])})\n y = Dataset({\"bar\": (\"y\", [2, 3])})\n\n (actual_x,) = broadcast(x)\n assert_identical(x, actual_x)\n assert source_ndarray(actual_x[\"foo\"].data) is source_ndarray(x[\"foo\"].data)\n\n actual_x, actual_y = broadcast(x, y)\n assert_identical(x, actual_x)\n assert source_ndarray(actual_x[\"foo\"].data) is source_ndarray(x[\"foo\"].data)\n\n def test_broadcast_exclude(self):\n x = Dataset(\n {\n \"foo\": DataArray(\n [[1, 2], [3, 4]], dims=[\"x\", \"y\"], coords={\"x\": [1, 2], \"y\": [3, 4]}\n ),\n \"bar\": DataArray(5),\n }\n )\n y = Dataset(\n {\n \"foo\": DataArray(\n [[1, 2]], dims=[\"z\", \"y\"], coords={\"z\": [1], \"y\": [5, 6]}\n )\n }\n )\n x2, y2 = broadcast(x, y, exclude=[\"y\"])\n\n expected_x2 = Dataset(\n {\n \"foo\": DataArray(\n [[[1, 2]], [[3, 4]]],\n dims=[\"x\", \"z\", \"y\"],\n coords={\"z\": [1], \"x\": [1, 2], \"y\": [3, 4]},\n ),\n \"bar\": DataArray(\n [[5], [5]], dims=[\"x\", \"z\"], coords={\"x\": [1, 2], \"z\": [1]}\n ),\n }\n )\n expected_y2 = Dataset(\n {\n \"foo\": DataArray(\n [[[1, 2]], [[1, 2]]],\n dims=[\"x\", \"z\", \"y\"],\n coords={\"z\": [1], \"x\": [1, 2], \"y\": [5, 6]},\n )\n }\n )\n assert_identical(expected_x2, x2)\n assert_identical(expected_y2, y2)\n\n def test_broadcast_misaligned(self):\n x = Dataset({\"foo\": DataArray([1, 2, 3], coords=[(\"x\", [-1, -2, -3])])})\n y = Dataset(\n {\n \"bar\": DataArray(\n [[1, 2], [3, 4]],\n dims=[\"y\", \"x\"],\n coords={\"y\": [1, 2], \"x\": [10, -3]},\n )\n }\n )\n x2, y2 = broadcast(x, y)\n expected_x2 = Dataset(\n {\n \"foo\": DataArray(\n [[3, 3], [2, 2], [1, 1], [np.nan, np.nan]],\n dims=[\"x\", \"y\"],\n coords={\"y\": [1, 2], \"x\": [-3, -2, -1, 10]},\n )\n }\n )\n expected_y2 = Dataset(\n {\n \"bar\": DataArray(\n [[2, 4], [np.nan, np.nan], [np.nan, np.nan], [1, 3]],\n dims=[\"x\", \"y\"],\n coords={\"y\": [1, 2], \"x\": [-3, -2, -1, 10]},\n )\n }\n )\n assert_identical(expected_x2, x2)\n assert_identical(expected_y2, y2)\n\n def test_variable_indexing(self):\n data = create_test_data()\n v = data[\"var1\"]\n d1 = data[\"dim1\"]\n d2 = data[\"dim2\"]\n assert_equal(v, v[d1.values])\n assert_equal(v, v[d1])\n assert_equal(v[:3], v[d1 < 3])\n assert_equal(v[:, 3:], v[:, d2 >= 1.5])\n assert_equal(v[:3, 3:], v[d1 < 3, d2 >= 1.5])\n assert_equal(v[:3, :2], v[range(3), range(2)])\n assert_equal(v[:3, :2], v.loc[d1[:3], d2[:2]])\n\n def test_drop_variables(self):\n data = create_test_data()\n\n assert_identical(data, data.drop_vars([]))\n\n expected = Dataset({k: data[k] for k in data.variables if k != \"time\"})\n actual = data.drop_vars(\"time\")\n assert_identical(expected, actual)\n actual = data.drop_vars([\"time\"])\n assert_identical(expected, actual)\n\n with raises_regex(ValueError, \"cannot be found\"):\n data.drop_vars(\"not_found_here\")\n\n actual = data.drop_vars(\"not_found_here\", errors=\"ignore\")\n assert_identical(data, actual)\n\n actual = data.drop_vars([\"not_found_here\"], errors=\"ignore\")\n assert_identical(data, actual)\n\n actual = data.drop_vars([\"time\", \"not_found_here\"], errors=\"ignore\")\n assert_identical(expected, actual)\n\n # deprecated approach with `drop` works (straight copy paste from above)\n\n with pytest.warns(PendingDeprecationWarning):\n actual = data.drop(\"not_found_here\", errors=\"ignore\")\n assert_identical(data, actual)\n\n with pytest.warns(PendingDeprecationWarning):\n actual = data.drop([\"not_found_here\"], errors=\"ignore\")\n assert_identical(data, actual)\n\n with pytest.warns(PendingDeprecationWarning):\n actual = data.drop([\"time\", \"not_found_here\"], errors=\"ignore\")\n assert_identical(expected, actual)\n\n with pytest.warns(PendingDeprecationWarning):\n actual = data.drop({\"time\", \"not_found_here\"}, errors=\"ignore\")\n assert_identical(expected, actual)\n\n def test_drop_index_labels(self):\n data = Dataset({\"A\": ([\"x\", \"y\"], np.random.randn(2, 3)), \"x\": [\"a\", \"b\"]})\n\n with pytest.warns(DeprecationWarning):\n actual = data.drop([\"a\"], dim=\"x\")\n expected = data.isel(x=[1])\n assert_identical(expected, actual)\n\n with pytest.warns(DeprecationWarning):\n actual = data.drop([\"a\", \"b\"], dim=\"x\")\n expected = data.isel(x=slice(0, 0))\n assert_identical(expected, actual)\n\n with pytest.raises(KeyError):\n # not contained in axis\n with pytest.warns(DeprecationWarning):\n data.drop([\"c\"], dim=\"x\")\n\n with pytest.warns(DeprecationWarning):\n actual = data.drop([\"c\"], dim=\"x\", errors=\"ignore\")\n assert_identical(data, actual)\n\n with pytest.raises(ValueError):\n with pytest.warns(DeprecationWarning):\n data.drop([\"c\"], dim=\"x\", errors=\"wrong_value\")\n\n with pytest.warns(DeprecationWarning):\n actual = data.drop([\"a\", \"b\", \"c\"], \"x\", errors=\"ignore\")\n expected = data.isel(x=slice(0, 0))\n assert_identical(expected, actual)\n\n # DataArrays as labels are a nasty corner case as they are not\n # Iterable[Hashable] - DataArray.__iter__ yields scalar DataArrays.\n actual = data.drop_sel(x=DataArray([\"a\", \"b\", \"c\"]), errors=\"ignore\")\n expected = data.isel(x=slice(0, 0))\n assert_identical(expected, actual)\n with pytest.warns(DeprecationWarning):\n data.drop(DataArray([\"a\", \"b\", \"c\"]), dim=\"x\", errors=\"ignore\")\n assert_identical(expected, actual)\n\n with raises_regex(ValueError, \"does not have coordinate labels\"):\n data.drop_sel(y=1)\n\n def test_drop_labels_by_keyword(self):\n data = Dataset(\n {\"A\": ([\"x\", \"y\"], np.random.randn(2, 6)), \"x\": [\"a\", \"b\"], \"y\": range(6)}\n )\n # Basic functionality.\n assert len(data.coords[\"x\"]) == 2\n\n with pytest.warns(DeprecationWarning):\n ds1 = data.drop([\"a\"], dim=\"x\")\n ds2 = data.drop_sel(x=\"a\")\n ds3 = data.drop_sel(x=[\"a\"])\n ds4 = data.drop_sel(x=[\"a\", \"b\"])\n ds5 = data.drop_sel(x=[\"a\", \"b\"], y=range(0, 6, 2))\n\n arr = DataArray(range(3), dims=[\"c\"])\n with pytest.warns(FutureWarning):\n data.drop(arr.coords)\n with pytest.warns(FutureWarning):\n data.drop(arr.indexes)\n\n assert_array_equal(ds1.coords[\"x\"], [\"b\"])\n assert_array_equal(ds2.coords[\"x\"], [\"b\"])\n assert_array_equal(ds3.coords[\"x\"], [\"b\"])\n assert ds4.coords[\"x\"].size == 0\n assert ds5.coords[\"x\"].size == 0\n assert_array_equal(ds5.coords[\"y\"], [1, 3, 5])\n\n # Error handling if user tries both approaches.\n with pytest.raises(ValueError):\n data.drop(labels=[\"a\"], x=\"a\")\n with pytest.raises(ValueError):\n data.drop(labels=[\"a\"], dim=\"x\", x=\"a\")\n warnings.filterwarnings(\"ignore\", r\"\\W*drop\")\n with pytest.raises(ValueError):\n data.drop(dim=\"x\", x=\"a\")\n\n def test_drop_dims(self):\n data = xr.Dataset(\n {\n \"A\": ([\"x\", \"y\"], np.random.randn(2, 3)),\n \"B\": (\"x\", np.random.randn(2)),\n \"x\": [\"a\", \"b\"],\n \"z\": np.pi,\n }\n )\n\n actual = data.drop_dims(\"x\")\n expected = data.drop_vars([\"A\", \"B\", \"x\"])\n assert_identical(expected, actual)\n\n actual = data.drop_dims(\"y\")\n expected = data.drop_vars(\"A\")\n assert_identical(expected, actual)\n\n actual = data.drop_dims([\"x\", \"y\"])\n expected = data.drop_vars([\"A\", \"B\", \"x\"])\n assert_identical(expected, actual)\n\n with pytest.raises((ValueError, KeyError)):\n data.drop_dims(\"z\") # not a dimension\n\n with pytest.raises((ValueError, KeyError)):\n data.drop_dims(None)\n\n actual = data.drop_dims(\"z\", errors=\"ignore\")\n assert_identical(data, actual)\n\n actual = data.drop_dims(None, errors=\"ignore\")\n assert_identical(data, actual)\n\n with pytest.raises(ValueError):\n actual = data.drop_dims(\"z\", errors=\"wrong_value\")\n\n actual = data.drop_dims([\"x\", \"y\", \"z\"], errors=\"ignore\")\n expected = data.drop_vars([\"A\", \"B\", \"x\"])\n assert_identical(expected, actual)\n\n def test_copy(self):\n data = create_test_data()\n data.attrs[\"Test\"] = [1, 2, 3]\n\n for copied in [data.copy(deep=False), copy(data)]:\n assert_identical(data, copied)\n assert data.encoding == copied.encoding\n # Note: IndexVariable objects with string dtype are always\n # copied because of xarray.core.util.safe_cast_to_index.\n # Limiting the test to data variables.\n for k in data.data_vars:\n v0 = data.variables[k]\n v1 = copied.variables[k]\n assert source_ndarray(v0.data) is source_ndarray(v1.data)\n copied[\"foo\"] = (\"z\", np.arange(5))\n assert \"foo\" not in data\n\n copied.attrs[\"foo\"] = \"bar\"\n assert \"foo\" not in data.attrs\n assert data.attrs[\"Test\"] is copied.attrs[\"Test\"]\n\n for copied in [data.copy(deep=True), deepcopy(data)]:\n assert_identical(data, copied)\n for k, v0 in data.variables.items():\n v1 = copied.variables[k]\n assert v0 is not v1\n\n assert data.attrs[\"Test\"] is not copied.attrs[\"Test\"]\n\n def test_copy_with_data(self):\n orig = create_test_data()\n new_data = {k: np.random.randn(*v.shape) for k, v in orig.data_vars.items()}\n actual = orig.copy(data=new_data)\n\n expected = orig.copy()\n for k, v in new_data.items():\n expected[k].data = v\n assert_identical(expected, actual)\n\n @pytest.mark.xfail(raises=AssertionError)\n @pytest.mark.parametrize(\n \"deep, expected_orig\",\n [\n [\n True,\n xr.DataArray(\n xr.IndexVariable(\"a\", np.array([1, 2])),\n coords={\"a\": [1, 2]},\n dims=[\"a\"],\n ),\n ],\n [\n False,\n xr.DataArray(\n xr.IndexVariable(\"a\", np.array([999, 2])),\n coords={\"a\": [999, 2]},\n dims=[\"a\"],\n ),\n ],\n ],\n )\n def test_copy_coords(self, deep, expected_orig):\n \"\"\"The test fails for the shallow copy, and apparently only on Windows\n for some reason. In windows coords seem to be immutable unless it's one\n dataset deep copied from another.\"\"\"\n ds = xr.DataArray(\n np.ones([2, 2, 2]),\n coords={\"a\": [1, 2], \"b\": [\"x\", \"y\"], \"c\": [0, 1]},\n dims=[\"a\", \"b\", \"c\"],\n name=\"value\",\n ).to_dataset()\n ds_cp = ds.copy(deep=deep)\n ds_cp.coords[\"a\"].data[0] = 999\n\n expected_cp = xr.DataArray(\n xr.IndexVariable(\"a\", np.array([999, 2])),\n coords={\"a\": [999, 2]},\n dims=[\"a\"],\n )\n assert_identical(ds_cp.coords[\"a\"], expected_cp)\n\n assert_identical(ds.coords[\"a\"], expected_orig)\n\n def test_copy_with_data_errors(self):\n orig = create_test_data()\n new_var1 = np.arange(orig[\"var1\"].size).reshape(orig[\"var1\"].shape)\n with raises_regex(ValueError, \"Data must be dict-like\"):\n orig.copy(data=new_var1)\n with raises_regex(ValueError, \"only contain variables in original\"):\n orig.copy(data={\"not_in_original\": new_var1})\n with raises_regex(ValueError, \"contain all variables in original\"):\n orig.copy(data={\"var1\": new_var1})\n\n def test_rename(self):\n data = create_test_data()\n newnames = {\"var1\": \"renamed_var1\", \"dim2\": \"renamed_dim2\"}\n renamed = data.rename(newnames)\n\n variables = dict(data.variables)\n for k, v in newnames.items():\n variables[v] = variables.pop(k)\n\n for k, v in variables.items():\n dims = list(v.dims)\n for name, newname in newnames.items():\n if name in dims:\n dims[dims.index(name)] = newname\n\n assert_equal(\n Variable(dims, v.values, v.attrs),\n renamed[k].variable.to_base_variable(),\n )\n assert v.encoding == renamed[k].encoding\n assert type(v) is type(renamed.variables[k]) # noqa: E721\n\n assert \"var1\" not in renamed\n assert \"dim2\" not in renamed\n\n with raises_regex(ValueError, \"cannot rename 'not_a_var'\"):\n data.rename({\"not_a_var\": \"nada\"})\n\n with raises_regex(ValueError, \"'var1' conflicts\"):\n data.rename({\"var2\": \"var1\"})\n\n # verify that we can rename a variable without accessing the data\n var1 = data[\"var1\"]\n data[\"var1\"] = (var1.dims, InaccessibleArray(var1.values))\n renamed = data.rename(newnames)\n with pytest.raises(UnexpectedDataAccess):\n renamed[\"renamed_var1\"].values\n\n renamed_kwargs = data.rename(**newnames)\n assert_identical(renamed, renamed_kwargs)\n\n def test_rename_old_name(self):\n # regtest for GH1477\n data = create_test_data()\n\n with raises_regex(ValueError, \"'samecol' conflicts\"):\n data.rename({\"var1\": \"samecol\", \"var2\": \"samecol\"})\n\n # This shouldn't cause any problems.\n data.rename({\"var1\": \"var2\", \"var2\": \"var1\"})\n\n def test_rename_same_name(self):\n data = create_test_data()\n newnames = {\"var1\": \"var1\", \"dim2\": \"dim2\"}\n renamed = data.rename(newnames)\n assert_identical(renamed, data)\n\n def test_rename_inplace(self):\n times = pd.date_range(\"2000-01-01\", periods=3)\n data = Dataset({\"z\": (\"x\", [2, 3, 4]), \"t\": (\"t\", times)})\n with pytest.raises(TypeError):\n data.rename({\"x\": \"y\"}, inplace=True)\n\n def test_rename_dims(self):\n original = Dataset({\"x\": (\"x\", [0, 1, 2]), \"y\": (\"x\", [10, 11, 12]), \"z\": 42})\n expected = Dataset(\n {\"x\": (\"x_new\", [0, 1, 2]), \"y\": (\"x_new\", [10, 11, 12]), \"z\": 42}\n )\n expected = expected.set_coords(\"x\")\n dims_dict = {\"x\": \"x_new\"}\n actual = original.rename_dims(dims_dict)\n assert_identical(expected, actual)\n actual_2 = original.rename_dims(**dims_dict)\n assert_identical(expected, actual_2)\n\n # Test to raise ValueError\n dims_dict_bad = {\"x_bad\": \"x_new\"}\n with pytest.raises(ValueError):\n original.rename_dims(dims_dict_bad)\n\n with pytest.raises(ValueError):\n original.rename_dims({\"x\": \"z\"})\n\n def test_rename_vars(self):\n original = Dataset({\"x\": (\"x\", [0, 1, 2]), \"y\": (\"x\", [10, 11, 12]), \"z\": 42})\n expected = Dataset(\n {\"x_new\": (\"x\", [0, 1, 2]), \"y\": (\"x\", [10, 11, 12]), \"z\": 42}\n )\n expected = expected.set_coords(\"x_new\")\n name_dict = {\"x\": \"x_new\"}\n actual = original.rename_vars(name_dict)\n assert_identical(expected, actual)\n actual_2 = original.rename_vars(**name_dict)\n assert_identical(expected, actual_2)\n\n # Test to raise ValueError\n names_dict_bad = {\"x_bad\": \"x_new\"}\n with pytest.raises(ValueError):\n original.rename_vars(names_dict_bad)\n\n def test_rename_multiindex(self):\n mindex = pd.MultiIndex.from_tuples(\n [([1, 2]), ([3, 4])], names=[\"level0\", \"level1\"]\n )\n data = Dataset({}, {\"x\": mindex})\n with raises_regex(ValueError, \"conflicting MultiIndex\"):\n data.rename({\"x\": \"level0\"})\n\n @requires_cftime\n def test_rename_does_not_change_CFTimeIndex_type(self):\n # make sure CFTimeIndex is not converted to DatetimeIndex #3522\n\n time = xr.cftime_range(start=\"2000\", periods=6, freq=\"2MS\", calendar=\"noleap\")\n orig = Dataset(coords={\"time\": time})\n\n renamed = orig.rename(time=\"time_new\")\n assert \"time_new\" in renamed.indexes\n assert isinstance(renamed.indexes[\"time_new\"], CFTimeIndex)\n assert renamed.indexes[\"time_new\"].name == \"time_new\"\n\n # check original has not changed\n assert \"time\" in orig.indexes\n assert isinstance(orig.indexes[\"time\"], CFTimeIndex)\n assert orig.indexes[\"time\"].name == \"time\"\n\n # note: rename_dims(time=\"time_new\") drops \"ds.indexes\"\n renamed = orig.rename_dims()\n assert isinstance(renamed.indexes[\"time\"], CFTimeIndex)\n\n renamed = orig.rename_vars()\n assert isinstance(renamed.indexes[\"time\"], CFTimeIndex)\n\n def test_rename_does_not_change_DatetimeIndex_type(self):\n # make sure DatetimeIndex is conderved on rename\n\n time = pd.date_range(start=\"2000\", periods=6, freq=\"2MS\")\n orig = Dataset(coords={\"time\": time})\n\n renamed = orig.rename(time=\"time_new\")\n assert \"time_new\" in renamed.indexes\n assert isinstance(renamed.indexes[\"time_new\"], DatetimeIndex)\n assert renamed.indexes[\"time_new\"].name == \"time_new\"\n\n # check original has not changed\n assert \"time\" in orig.indexes\n assert isinstance(orig.indexes[\"time\"], DatetimeIndex)\n assert orig.indexes[\"time\"].name == \"time\"\n\n # note: rename_dims(time=\"time_new\") drops \"ds.indexes\"\n renamed = orig.rename_dims()\n assert isinstance(renamed.indexes[\"time\"], DatetimeIndex)\n\n renamed = orig.rename_vars()\n assert isinstance(renamed.indexes[\"time\"], DatetimeIndex)\n\n def test_swap_dims(self):\n original = Dataset({\"x\": [1, 2, 3], \"y\": (\"x\", list(\"abc\")), \"z\": 42})\n expected = Dataset({\"z\": 42}, {\"x\": (\"y\", [1, 2, 3]), \"y\": list(\"abc\")})\n actual = original.swap_dims({\"x\": \"y\"})\n assert_identical(expected, actual)\n assert isinstance(actual.variables[\"y\"], IndexVariable)\n assert isinstance(actual.variables[\"x\"], Variable)\n pd.testing.assert_index_equal(actual.indexes[\"y\"], expected.indexes[\"y\"])\n\n roundtripped = actual.swap_dims({\"y\": \"x\"})\n assert_identical(original.set_coords(\"y\"), roundtripped)\n\n with raises_regex(ValueError, \"cannot swap\"):\n original.swap_dims({\"y\": \"x\"})\n with raises_regex(ValueError, \"replacement dimension\"):\n original.swap_dims({\"x\": \"z\"})\n\n expected = Dataset(\n {\"y\": (\"u\", list(\"abc\")), \"z\": 42}, coords={\"x\": (\"u\", [1, 2, 3])}\n )\n actual = original.swap_dims({\"x\": \"u\"})\n assert_identical(expected, actual)\n\n # handle multiindex case\n idx = pd.MultiIndex.from_arrays([list(\"aab\"), list(\"yzz\")], names=[\"y1\", \"y2\"])\n original = Dataset({\"x\": [1, 2, 3], \"y\": (\"x\", idx), \"z\": 42})\n expected = Dataset({\"z\": 42}, {\"x\": (\"y\", [1, 2, 3]), \"y\": idx})\n actual = original.swap_dims({\"x\": \"y\"})\n assert_identical(expected, actual)\n assert isinstance(actual.variables[\"y\"], IndexVariable)\n assert isinstance(actual.variables[\"x\"], Variable)\n pd.testing.assert_index_equal(actual.indexes[\"y\"], expected.indexes[\"y\"])\n\n def test_expand_dims_error(self):\n original = Dataset(\n {\n \"x\": (\"a\", np.random.randn(3)),\n \"y\": ([\"b\", \"a\"], np.random.randn(4, 3)),\n \"z\": (\"a\", np.random.randn(3)),\n },\n coords={\n \"a\": np.linspace(0, 1, 3),\n \"b\": np.linspace(0, 1, 4),\n \"c\": np.linspace(0, 1, 5),\n },\n attrs={\"key\": \"entry\"},\n )\n\n with raises_regex(ValueError, \"already exists\"):\n original.expand_dims(dim=[\"x\"])\n\n # Make sure it raises true error also for non-dimensional coordinates\n # which has dimension.\n original = original.set_coords(\"z\")\n with raises_regex(ValueError, \"already exists\"):\n original.expand_dims(dim=[\"z\"])\n\n original = Dataset(\n {\n \"x\": (\"a\", np.random.randn(3)),\n \"y\": ([\"b\", \"a\"], np.random.randn(4, 3)),\n \"z\": (\"a\", np.random.randn(3)),\n },\n coords={\n \"a\": np.linspace(0, 1, 3),\n \"b\": np.linspace(0, 1, 4),\n \"c\": np.linspace(0, 1, 5),\n },\n attrs={\"key\": \"entry\"},\n )\n with raises_regex(TypeError, \"value of new dimension\"):\n original.expand_dims({\"d\": 3.2})\n with raises_regex(ValueError, \"both keyword and positional\"):\n original.expand_dims({\"d\": 4}, e=4)\n\n def test_expand_dims_int(self):\n original = Dataset(\n {\"x\": (\"a\", np.random.randn(3)), \"y\": ([\"b\", \"a\"], np.random.randn(4, 3))},\n coords={\n \"a\": np.linspace(0, 1, 3),\n \"b\": np.linspace(0, 1, 4),\n \"c\": np.linspace(0, 1, 5),\n },\n attrs={\"key\": \"entry\"},\n )\n\n actual = original.expand_dims([\"z\"], [1])\n expected = Dataset(\n {\n \"x\": original[\"x\"].expand_dims(\"z\", 1),\n \"y\": original[\"y\"].expand_dims(\"z\", 1),\n },\n coords={\n \"a\": np.linspace(0, 1, 3),\n \"b\": np.linspace(0, 1, 4),\n \"c\": np.linspace(0, 1, 5),\n },\n attrs={\"key\": \"entry\"},\n )\n assert_identical(expected, actual)\n # make sure squeeze restores the original data set.\n roundtripped = actual.squeeze(\"z\")\n assert_identical(original, roundtripped)\n\n # another test with a negative axis\n actual = original.expand_dims([\"z\"], [-1])\n expected = Dataset(\n {\n \"x\": original[\"x\"].expand_dims(\"z\", -1),\n \"y\": original[\"y\"].expand_dims(\"z\", -1),\n },\n coords={\n \"a\": np.linspace(0, 1, 3),\n \"b\": np.linspace(0, 1, 4),\n \"c\": np.linspace(0, 1, 5),\n },\n attrs={\"key\": \"entry\"},\n )\n assert_identical(expected, actual)\n # make sure squeeze restores the original data set.\n roundtripped = actual.squeeze(\"z\")\n assert_identical(original, roundtripped)\n\n def test_expand_dims_coords(self):\n original = Dataset({\"x\": (\"a\", np.array([1, 2, 3]))})\n expected = Dataset(\n {\"x\": ((\"b\", \"a\"), np.array([[1, 2, 3], [1, 2, 3]]))}, coords={\"b\": [1, 2]}\n )\n actual = original.expand_dims(dict(b=[1, 2]))\n assert_identical(expected, actual)\n assert \"b\" not in original._coord_names\n\n def test_expand_dims_existing_scalar_coord(self):\n original = Dataset({\"x\": 1}, {\"a\": 2})\n expected = Dataset({\"x\": ((\"a\",), [1])}, {\"a\": [2]})\n actual = original.expand_dims(\"a\")\n assert_identical(expected, actual)\n\n def test_isel_expand_dims_roundtrip(self):\n original = Dataset({\"x\": ((\"a\",), [1])}, {\"a\": [2]})\n actual = original.isel(a=0).expand_dims(\"a\")\n assert_identical(actual, original)\n\n def test_expand_dims_mixed_int_and_coords(self):\n # Test expanding one dimension to have size > 1 that doesn't have\n # coordinates, and also expanding another dimension to have size > 1\n # that DOES have coordinates.\n original = Dataset(\n {\"x\": (\"a\", np.random.randn(3)), \"y\": ([\"b\", \"a\"], np.random.randn(4, 3))},\n coords={\n \"a\": np.linspace(0, 1, 3),\n \"b\": np.linspace(0, 1, 4),\n \"c\": np.linspace(0, 1, 5),\n },\n )\n\n actual = original.expand_dims({\"d\": 4, \"e\": [\"l\", \"m\", \"n\"]})\n\n expected = Dataset(\n {\n \"x\": xr.DataArray(\n original[\"x\"].values * np.ones([4, 3, 3]),\n coords=dict(d=range(4), e=[\"l\", \"m\", \"n\"], a=np.linspace(0, 1, 3)),\n dims=[\"d\", \"e\", \"a\"],\n ).drop_vars(\"d\"),\n \"y\": xr.DataArray(\n original[\"y\"].values * np.ones([4, 3, 4, 3]),\n coords=dict(\n d=range(4),\n e=[\"l\", \"m\", \"n\"],\n b=np.linspace(0, 1, 4),\n a=np.linspace(0, 1, 3),\n ),\n dims=[\"d\", \"e\", \"b\", \"a\"],\n ).drop_vars(\"d\"),\n },\n coords={\"c\": np.linspace(0, 1, 5)},\n )\n assert_identical(actual, expected)\n\n def test_expand_dims_kwargs_python36plus(self):\n original = Dataset(\n {\"x\": (\"a\", np.random.randn(3)), \"y\": ([\"b\", \"a\"], np.random.randn(4, 3))},\n coords={\n \"a\": np.linspace(0, 1, 3),\n \"b\": np.linspace(0, 1, 4),\n \"c\": np.linspace(0, 1, 5),\n },\n attrs={\"key\": \"entry\"},\n )\n other_way = original.expand_dims(e=[\"l\", \"m\", \"n\"])\n other_way_expected = Dataset(\n {\n \"x\": xr.DataArray(\n original[\"x\"].values * np.ones([3, 3]),\n coords=dict(e=[\"l\", \"m\", \"n\"], a=np.linspace(0, 1, 3)),\n dims=[\"e\", \"a\"],\n ),\n \"y\": xr.DataArray(\n original[\"y\"].values * np.ones([3, 4, 3]),\n coords=dict(\n e=[\"l\", \"m\", \"n\"],\n b=np.linspace(0, 1, 4),\n a=np.linspace(0, 1, 3),\n ),\n dims=[\"e\", \"b\", \"a\"],\n ),\n },\n coords={\"c\": np.linspace(0, 1, 5)},\n attrs={\"key\": \"entry\"},\n )\n assert_identical(other_way_expected, other_way)\n\n def test_set_index(self):\n expected = create_test_multiindex()\n mindex = expected[\"x\"].to_index()\n indexes = [mindex.get_level_values(n) for n in mindex.names]\n coords = {idx.name: (\"x\", idx) for idx in indexes}\n ds = Dataset({}, coords=coords)\n\n obj = ds.set_index(x=mindex.names)\n assert_identical(obj, expected)\n\n with pytest.raises(TypeError):\n ds.set_index(x=mindex.names, inplace=True)\n assert_identical(ds, expected)\n\n # ensure set_index with no existing index and a single data var given\n # doesn't return multi-index\n ds = Dataset(data_vars={\"x_var\": (\"x\", [0, 1, 2])})\n expected = Dataset(coords={\"x\": [0, 1, 2]})\n assert_identical(ds.set_index(x=\"x_var\"), expected)\n\n # Issue 3176: Ensure clear error message on key error.\n with pytest.raises(ValueError) as excinfo:\n ds.set_index(foo=\"bar\")\n assert str(excinfo.value) == \"bar is not the name of an existing variable.\"\n\n def test_reset_index(self):\n ds = create_test_multiindex()\n mindex = ds[\"x\"].to_index()\n indexes = [mindex.get_level_values(n) for n in mindex.names]\n coords = {idx.name: (\"x\", idx) for idx in indexes}\n expected = Dataset({}, coords=coords)\n\n obj = ds.reset_index(\"x\")\n assert_identical(obj, expected)\n\n with pytest.raises(TypeError):\n ds.reset_index(\"x\", inplace=True)\n\n def test_reset_index_keep_attrs(self):\n coord_1 = DataArray([1, 2], dims=[\"coord_1\"], attrs={\"attrs\": True})\n ds = Dataset({}, {\"coord_1\": coord_1})\n expected = Dataset({}, {\"coord_1_\": coord_1})\n obj = ds.reset_index(\"coord_1\")\n assert_identical(expected, obj)\n\n def test_reorder_levels(self):\n ds = create_test_multiindex()\n mindex = ds[\"x\"].to_index()\n midx = mindex.reorder_levels([\"level_2\", \"level_1\"])\n expected = Dataset({}, coords={\"x\": midx})\n\n reindexed = ds.reorder_levels(x=[\"level_2\", \"level_1\"])\n assert_identical(reindexed, expected)\n\n with pytest.raises(TypeError):\n ds.reorder_levels(x=[\"level_2\", \"level_1\"], inplace=True)\n\n ds = Dataset({}, coords={\"x\": [1, 2]})\n with raises_regex(ValueError, \"has no MultiIndex\"):\n ds.reorder_levels(x=[\"level_1\", \"level_2\"])\n\n def test_stack(self):\n ds = Dataset(\n {\"a\": (\"x\", [0, 1]), \"b\": ((\"x\", \"y\"), [[0, 1], [2, 3]]), \"y\": [\"a\", \"b\"]}\n )\n\n exp_index = pd.MultiIndex.from_product([[0, 1], [\"a\", \"b\"]], names=[\"x\", \"y\"])\n expected = Dataset(\n {\"a\": (\"z\", [0, 0, 1, 1]), \"b\": (\"z\", [0, 1, 2, 3]), \"z\": exp_index}\n )\n actual = ds.stack(z=[\"x\", \"y\"])\n assert_identical(expected, actual)\n\n actual = ds.stack(z=[...])\n assert_identical(expected, actual)\n\n # non list dims with ellipsis\n actual = ds.stack(z=(...,))\n assert_identical(expected, actual)\n\n # ellipsis with given dim\n actual = ds.stack(z=[..., \"y\"])\n assert_identical(expected, actual)\n\n exp_index = pd.MultiIndex.from_product([[\"a\", \"b\"], [0, 1]], names=[\"y\", \"x\"])\n expected = Dataset(\n {\"a\": (\"z\", [0, 1, 0, 1]), \"b\": (\"z\", [0, 2, 1, 3]), \"z\": exp_index}\n )\n actual = ds.stack(z=[\"y\", \"x\"])\n assert_identical(expected, actual)\n\n def test_unstack(self):\n index = pd.MultiIndex.from_product([[0, 1], [\"a\", \"b\"]], names=[\"x\", \"y\"])\n ds = Dataset({\"b\": (\"z\", [0, 1, 2, 3]), \"z\": index})\n expected = Dataset(\n {\"b\": ((\"x\", \"y\"), [[0, 1], [2, 3]]), \"x\": [0, 1], \"y\": [\"a\", \"b\"]}\n )\n for dim in [\"z\", [\"z\"], None]:\n actual = ds.unstack(dim)\n assert_identical(actual, expected)\n\n def test_unstack_errors(self):\n ds = Dataset({\"x\": [1, 2, 3]})\n with raises_regex(ValueError, \"does not contain the dimensions\"):\n ds.unstack(\"foo\")\n with raises_regex(ValueError, \"do not have a MultiIndex\"):\n ds.unstack(\"x\")\n\n def test_unstack_fill_value(self):\n ds = xr.Dataset(\n {\"var\": ((\"x\",), np.arange(6)), \"other_var\": ((\"x\",), np.arange(3, 9))},\n coords={\"x\": [0, 1, 2] * 2, \"y\": ((\"x\",), [\"a\"] * 3 + [\"b\"] * 3)},\n )\n # make ds incomplete\n ds = ds.isel(x=[0, 2, 3, 4]).set_index(index=[\"x\", \"y\"])\n # test fill_value\n actual = ds.unstack(\"index\", fill_value=-1)\n expected = ds.unstack(\"index\").fillna(-1).astype(int)\n assert actual[\"var\"].dtype == int\n assert_equal(actual, expected)\n\n actual = ds[\"var\"].unstack(\"index\", fill_value=-1)\n expected = ds[\"var\"].unstack(\"index\").fillna(-1).astype(int)\n assert_equal(actual, expected)\n\n actual = ds.unstack(\"index\", fill_value={\"var\": -1, \"other_var\": 1})\n expected = ds.unstack(\"index\").fillna({\"var\": -1, \"other_var\": 1}).astype(int)\n assert_equal(actual, expected)\n\n @requires_sparse\n def test_unstack_sparse(self):\n ds = xr.Dataset(\n {\"var\": ((\"x\",), np.arange(6))},\n coords={\"x\": [0, 1, 2] * 2, \"y\": ((\"x\",), [\"a\"] * 3 + [\"b\"] * 3)},\n )\n # make ds incomplete\n ds = ds.isel(x=[0, 2, 3, 4]).set_index(index=[\"x\", \"y\"])\n # test fill_value\n actual = ds.unstack(\"index\", sparse=True)\n expected = ds.unstack(\"index\")\n assert actual[\"var\"].variable._to_dense().equals(expected[\"var\"].variable)\n assert actual[\"var\"].data.density < 1.0\n\n actual = ds[\"var\"].unstack(\"index\", sparse=True)\n expected = ds[\"var\"].unstack(\"index\")\n assert actual.variable._to_dense().equals(expected.variable)\n assert actual.data.density < 1.0\n\n def test_stack_unstack_fast(self):\n ds = Dataset(\n {\n \"a\": (\"x\", [0, 1]),\n \"b\": ((\"x\", \"y\"), [[0, 1], [2, 3]]),\n \"x\": [0, 1],\n \"y\": [\"a\", \"b\"],\n }\n )\n actual = ds.stack(z=[\"x\", \"y\"]).unstack(\"z\")\n assert actual.broadcast_equals(ds)\n\n actual = ds[[\"b\"]].stack(z=[\"x\", \"y\"]).unstack(\"z\")\n assert actual.identical(ds[[\"b\"]])\n\n def test_stack_unstack_slow(self):\n ds = Dataset(\n {\n \"a\": (\"x\", [0, 1]),\n \"b\": ((\"x\", \"y\"), [[0, 1], [2, 3]]),\n \"x\": [0, 1],\n \"y\": [\"a\", \"b\"],\n }\n )\n stacked = ds.stack(z=[\"x\", \"y\"])\n actual = stacked.isel(z=slice(None, None, -1)).unstack(\"z\")\n assert actual.broadcast_equals(ds)\n\n stacked = ds[[\"b\"]].stack(z=[\"x\", \"y\"])\n actual = stacked.isel(z=slice(None, None, -1)).unstack(\"z\")\n assert actual.identical(ds[[\"b\"]])\n\n def test_to_stacked_array_invalid_sample_dims(self):\n data = xr.Dataset(\n data_vars={\"a\": ((\"x\", \"y\"), [[0, 1, 2], [3, 4, 5]]), \"b\": (\"x\", [6, 7])},\n coords={\"y\": [\"u\", \"v\", \"w\"]},\n )\n with pytest.raises(ValueError):\n data.to_stacked_array(\"features\", sample_dims=[\"y\"])\n\n def test_to_stacked_array_name(self):\n name = \"adf9d\"\n\n # make a two dimensional dataset\n a, b = create_test_stacked_array()\n D = xr.Dataset({\"a\": a, \"b\": b})\n sample_dims = [\"x\"]\n\n y = D.to_stacked_array(\"features\", sample_dims, name=name)\n assert y.name == name\n\n def test_to_stacked_array_dtype_dims(self):\n # make a two dimensional dataset\n a, b = create_test_stacked_array()\n D = xr.Dataset({\"a\": a, \"b\": b})\n sample_dims = [\"x\"]\n y = D.to_stacked_array(\"features\", sample_dims)\n assert y.indexes[\"features\"].levels[1].dtype == D.y.dtype\n assert y.dims == (\"x\", \"features\")\n\n def test_to_stacked_array_to_unstacked_dataset(self):\n\n # single dimension: regression test for GH4049\n arr = xr.DataArray(np.arange(3), coords=[(\"x\", [0, 1, 2])])\n data = xr.Dataset({\"a\": arr, \"b\": arr})\n stacked = data.to_stacked_array(\"y\", sample_dims=[\"x\"])\n unstacked = stacked.to_unstacked_dataset(\"y\")\n assert_identical(unstacked, data)\n\n # make a two dimensional dataset\n a, b = create_test_stacked_array()\n D = xr.Dataset({\"a\": a, \"b\": b})\n sample_dims = [\"x\"]\n y = D.to_stacked_array(\"features\", sample_dims).transpose(\"x\", \"features\")\n\n x = y.to_unstacked_dataset(\"features\")\n assert_identical(D, x)\n\n # test on just one sample\n x0 = y[0].to_unstacked_dataset(\"features\")\n d0 = D.isel(x=0)\n assert_identical(d0, x0)\n\n def test_to_stacked_array_to_unstacked_dataset_different_dimension(self):\n # test when variables have different dimensionality\n a, b = create_test_stacked_array()\n sample_dims = [\"x\"]\n D = xr.Dataset({\"a\": a, \"b\": b.isel(y=0)})\n\n y = D.to_stacked_array(\"features\", sample_dims)\n x = y.to_unstacked_dataset(\"features\")\n assert_identical(D, x)\n\n def test_update(self):\n data = create_test_data(seed=0)\n expected = data.copy()\n var2 = Variable(\"dim1\", np.arange(8))\n actual = data.update({\"var2\": var2})\n expected[\"var2\"] = var2\n assert_identical(expected, actual)\n\n actual = data.copy()\n actual_result = actual.update(data)\n assert actual_result is actual\n assert_identical(expected, actual)\n\n with pytest.raises(TypeError):\n actual = data.update(data, inplace=False)\n\n other = Dataset(attrs={\"new\": \"attr\"})\n actual = data.copy()\n actual.update(other)\n assert_identical(expected, actual)\n\n def test_update_overwrite_coords(self):\n data = Dataset({\"a\": (\"x\", [1, 2])}, {\"b\": 3})\n data.update(Dataset(coords={\"b\": 4}))\n expected = Dataset({\"a\": (\"x\", [1, 2])}, {\"b\": 4})\n assert_identical(data, expected)\n\n data = Dataset({\"a\": (\"x\", [1, 2])}, {\"b\": 3})\n data.update(Dataset({\"c\": 5}, coords={\"b\": 4}))\n expected = Dataset({\"a\": (\"x\", [1, 2]), \"c\": 5}, {\"b\": 4})\n assert_identical(data, expected)\n\n data = Dataset({\"a\": (\"x\", [1, 2])}, {\"b\": 3})\n data.update({\"c\": DataArray(5, coords={\"b\": 4})})\n expected = Dataset({\"a\": (\"x\", [1, 2]), \"c\": 5}, {\"b\": 3})\n assert_identical(data, expected)\n\n def test_update_auto_align(self):\n ds = Dataset({\"x\": (\"t\", [3, 4])}, {\"t\": [0, 1]})\n\n expected = Dataset({\"x\": (\"t\", [3, 4]), \"y\": (\"t\", [np.nan, 5])}, {\"t\": [0, 1]})\n actual = ds.copy()\n other = {\"y\": (\"t\", [5]), \"t\": [1]}\n with raises_regex(ValueError, \"conflicting sizes\"):\n actual.update(other)\n actual.update(Dataset(other))\n assert_identical(expected, actual)\n\n actual = ds.copy()\n other = Dataset({\"y\": (\"t\", [5]), \"t\": [100]})\n actual.update(other)\n expected = Dataset(\n {\"x\": (\"t\", [3, 4]), \"y\": (\"t\", [np.nan] * 2)}, {\"t\": [0, 1]}\n )\n assert_identical(expected, actual)\n\n def test_getitem(self):\n data = create_test_data()\n assert isinstance(data[\"var1\"], DataArray)\n assert_equal(data[\"var1\"].variable, data.variables[\"var1\"])\n with pytest.raises(KeyError):\n data[\"notfound\"]\n with pytest.raises(KeyError):\n data[[\"var1\", \"notfound\"]]\n\n actual = data[[\"var1\", \"var2\"]]\n expected = Dataset({\"var1\": data[\"var1\"], \"var2\": data[\"var2\"]})\n assert_equal(expected, actual)\n\n actual = data[\"numbers\"]\n expected = DataArray(\n data[\"numbers\"].variable,\n {\"dim3\": data[\"dim3\"], \"numbers\": data[\"numbers\"]},\n dims=\"dim3\",\n name=\"numbers\",\n )\n assert_identical(expected, actual)\n\n actual = data[dict(dim1=0)]\n expected = data.isel(dim1=0)\n assert_identical(expected, actual)\n\n def test_getitem_hashable(self):\n data = create_test_data()\n data[(3, 4)] = data[\"var1\"] + 1\n expected = data[\"var1\"] + 1\n expected.name = (3, 4)\n assert_identical(expected, data[(3, 4)])\n with raises_regex(KeyError, \"('var1', 'var2')\"):\n data[(\"var1\", \"var2\")]\n\n def test_virtual_variables_default_coords(self):\n dataset = Dataset({\"foo\": (\"x\", range(10))})\n expected = DataArray(range(10), dims=\"x\", name=\"x\")\n actual = dataset[\"x\"]\n assert_identical(expected, actual)\n assert isinstance(actual.variable, IndexVariable)\n\n actual = dataset[[\"x\", \"foo\"]]\n expected = dataset.assign_coords(x=range(10))\n assert_identical(expected, actual)\n\n def test_virtual_variables_time(self):\n # access virtual variables\n data = create_test_data()\n expected = DataArray(\n 1 + np.arange(20), coords=[data[\"time\"]], dims=\"time\", name=\"dayofyear\"\n )\n\n assert_array_equal(\n data[\"time.month\"].values, data.variables[\"time\"].to_index().month\n )\n assert_array_equal(data[\"time.season\"].values, \"DJF\")\n # test virtual variable math\n assert_array_equal(data[\"time.dayofyear\"] + 1, 2 + np.arange(20))\n assert_array_equal(np.sin(data[\"time.dayofyear\"]), np.sin(1 + np.arange(20)))\n # ensure they become coordinates\n expected = Dataset({}, {\"dayofyear\": data[\"time.dayofyear\"]})\n actual = data[[\"time.dayofyear\"]]\n assert_equal(expected, actual)\n # non-coordinate variables\n ds = Dataset({\"t\": (\"x\", pd.date_range(\"2000-01-01\", periods=3))})\n assert (ds[\"t.year\"] == 2000).all()\n\n def test_virtual_variable_same_name(self):\n # regression test for GH367\n times = pd.date_range(\"2000-01-01\", freq=\"H\", periods=5)\n data = Dataset({\"time\": times})\n actual = data[\"time.time\"]\n expected = DataArray(times.time, [(\"time\", times)], name=\"time\")\n assert_identical(actual, expected)\n\n def test_virtual_variable_multiindex(self):\n # access multi-index levels as virtual variables\n data = create_test_multiindex()\n expected = DataArray(\n [\"a\", \"a\", \"b\", \"b\"],\n name=\"level_1\",\n coords=[data[\"x\"].to_index()],\n dims=\"x\",\n )\n assert_identical(expected, data[\"level_1\"])\n\n # combine multi-index level and datetime\n dr_index = pd.date_range(\"1/1/2011\", periods=4, freq=\"H\")\n mindex = pd.MultiIndex.from_arrays(\n [[\"a\", \"a\", \"b\", \"b\"], dr_index], names=(\"level_str\", \"level_date\")\n )\n data = Dataset({}, {\"x\": mindex})\n expected = DataArray(\n mindex.get_level_values(\"level_date\").hour,\n name=\"hour\",\n coords=[mindex],\n dims=\"x\",\n )\n assert_identical(expected, data[\"level_date.hour\"])\n\n # attribute style access\n assert_identical(data.level_str, data[\"level_str\"])\n\n def test_time_season(self):\n ds = Dataset({\"t\": pd.date_range(\"2000-01-01\", periods=12, freq=\"M\")})\n seas = [\"DJF\"] * 2 + [\"MAM\"] * 3 + [\"JJA\"] * 3 + [\"SON\"] * 3 + [\"DJF\"]\n assert_array_equal(seas, ds[\"t.season\"])\n\n def test_slice_virtual_variable(self):\n data = create_test_data()\n assert_equal(\n data[\"time.dayofyear\"][:10].variable, Variable([\"time\"], 1 + np.arange(10))\n )\n assert_equal(data[\"time.dayofyear\"][0].variable, Variable([], 1))\n\n def test_setitem(self):\n # assign a variable\n var = Variable([\"dim1\"], np.random.randn(8))\n data1 = create_test_data()\n data1[\"A\"] = var\n data2 = data1.copy()\n data2[\"A\"] = var\n assert_identical(data1, data2)\n # assign a dataset array\n dv = 2 * data2[\"A\"]\n data1[\"B\"] = dv.variable\n data2[\"B\"] = dv\n assert_identical(data1, data2)\n # can't assign an ND array without dimensions\n with raises_regex(ValueError, \"without explicit dimension names\"):\n data2[\"C\"] = var.values.reshape(2, 4)\n # but can assign a 1D array\n data1[\"C\"] = var.values\n data2[\"C\"] = (\"C\", var.values)\n assert_identical(data1, data2)\n # can assign a scalar\n data1[\"scalar\"] = 0\n data2[\"scalar\"] = ([], 0)\n assert_identical(data1, data2)\n # can't use the same dimension name as a scalar var\n with raises_regex(ValueError, \"already exists as a scalar\"):\n data1[\"newvar\"] = (\"scalar\", [3, 4, 5])\n # can't resize a used dimension\n with raises_regex(ValueError, \"arguments without labels\"):\n data1[\"dim1\"] = data1[\"dim1\"][:5]\n # override an existing value\n data1[\"A\"] = 3 * data2[\"A\"]\n assert_equal(data1[\"A\"], 3 * data2[\"A\"])\n\n with pytest.raises(NotImplementedError):\n data1[{\"x\": 0}] = 0\n\n def test_setitem_pandas(self):\n\n ds = self.make_example_math_dataset()\n ds[\"x\"] = np.arange(3)\n ds_copy = ds.copy()\n ds_copy[\"bar\"] = ds[\"bar\"].to_pandas()\n\n assert_equal(ds, ds_copy)\n\n def test_setitem_auto_align(self):\n ds = Dataset()\n ds[\"x\"] = (\"y\", range(3))\n ds[\"y\"] = 1 + np.arange(3)\n expected = Dataset({\"x\": (\"y\", range(3)), \"y\": 1 + np.arange(3)})\n assert_identical(ds, expected)\n\n ds[\"y\"] = DataArray(range(3), dims=\"y\")\n expected = Dataset({\"x\": (\"y\", range(3))}, {\"y\": range(3)})\n assert_identical(ds, expected)\n\n ds[\"x\"] = DataArray([1, 2], coords=[(\"y\", [0, 1])])\n expected = Dataset({\"x\": (\"y\", [1, 2, np.nan])}, {\"y\": range(3)})\n assert_identical(ds, expected)\n\n ds[\"x\"] = 42\n expected = Dataset({\"x\": 42, \"y\": range(3)})\n assert_identical(ds, expected)\n\n ds[\"x\"] = DataArray([4, 5, 6, 7], coords=[(\"y\", [0, 1, 2, 3])])\n expected = Dataset({\"x\": (\"y\", [4, 5, 6])}, {\"y\": range(3)})\n assert_identical(ds, expected)\n\n def test_setitem_dimension_override(self):\n # regression test for GH-3377\n ds = xr.Dataset({\"x\": [0, 1, 2]})\n ds[\"x\"] = ds[\"x\"][:2]\n expected = Dataset({\"x\": [0, 1]})\n assert_identical(ds, expected)\n\n ds = xr.Dataset({\"x\": [0, 1, 2]})\n ds[\"x\"] = np.array([0, 1])\n assert_identical(ds, expected)\n\n ds = xr.Dataset({\"x\": [0, 1, 2]})\n ds.coords[\"x\"] = [0, 1]\n assert_identical(ds, expected)\n\n def test_setitem_with_coords(self):\n # Regression test for GH:2068\n ds = create_test_data()\n\n other = DataArray(\n np.arange(10), dims=\"dim3\", coords={\"numbers\": (\"dim3\", np.arange(10))}\n )\n expected = ds.copy()\n expected[\"var3\"] = other.drop_vars(\"numbers\")\n actual = ds.copy()\n actual[\"var3\"] = other\n assert_identical(expected, actual)\n assert \"numbers\" in other.coords # should not change other\n\n # with alignment\n other = ds[\"var3\"].isel(dim3=slice(1, -1))\n other[\"numbers\"] = (\"dim3\", np.arange(8))\n actual = ds.copy()\n actual[\"var3\"] = other\n assert \"numbers\" in other.coords # should not change other\n expected = ds.copy()\n expected[\"var3\"] = ds[\"var3\"].isel(dim3=slice(1, -1))\n assert_identical(expected, actual)\n\n # with non-duplicate coords\n other = ds[\"var3\"].isel(dim3=slice(1, -1))\n other[\"numbers\"] = (\"dim3\", np.arange(8))\n other[\"position\"] = (\"dim3\", np.arange(8))\n actual = ds.copy()\n actual[\"var3\"] = other\n assert \"position\" in actual\n assert \"position\" in other.coords\n\n # assigning a coordinate-only dataarray\n actual = ds.copy()\n other = actual[\"numbers\"]\n other[0] = 10\n actual[\"numbers\"] = other\n assert actual[\"numbers\"][0] == 10\n\n # GH: 2099\n ds = Dataset(\n {\"var\": (\"x\", [1, 2, 3])},\n coords={\"x\": [0, 1, 2], \"z1\": (\"x\", [1, 2, 3]), \"z2\": (\"x\", [1, 2, 3])},\n )\n ds[\"var\"] = ds[\"var\"] * 2\n assert np.allclose(ds[\"var\"], [2, 4, 6])\n\n def test_setitem_align_new_indexes(self):\n ds = Dataset({\"foo\": (\"x\", [1, 2, 3])}, {\"x\": [0, 1, 2]})\n ds[\"bar\"] = DataArray([2, 3, 4], [(\"x\", [1, 2, 3])])\n expected = Dataset(\n {\"foo\": (\"x\", [1, 2, 3]), \"bar\": (\"x\", [np.nan, 2, 3])}, {\"x\": [0, 1, 2]}\n )\n assert_identical(ds, expected)\n\n def test_assign(self):\n ds = Dataset()\n actual = ds.assign(x=[0, 1, 2], y=2)\n expected = Dataset({\"x\": [0, 1, 2], \"y\": 2})\n assert_identical(actual, expected)\n assert list(actual.variables) == [\"x\", \"y\"]\n assert_identical(ds, Dataset())\n\n actual = actual.assign(y=lambda ds: ds.x ** 2)\n expected = Dataset({\"y\": (\"x\", [0, 1, 4]), \"x\": [0, 1, 2]})\n assert_identical(actual, expected)\n\n actual = actual.assign_coords(z=2)\n expected = Dataset({\"y\": (\"x\", [0, 1, 4])}, {\"z\": 2, \"x\": [0, 1, 2]})\n assert_identical(actual, expected)\n\n ds = Dataset({\"a\": (\"x\", range(3))}, {\"b\": (\"x\", [\"A\"] * 2 + [\"B\"])})\n actual = ds.groupby(\"b\").assign(c=lambda ds: 2 * ds.a)\n expected = ds.merge({\"c\": (\"x\", [0, 2, 4])})\n assert_identical(actual, expected)\n\n actual = ds.groupby(\"b\").assign(c=lambda ds: ds.a.sum())\n expected = ds.merge({\"c\": (\"x\", [1, 1, 2])})\n assert_identical(actual, expected)\n\n actual = ds.groupby(\"b\").assign_coords(c=lambda ds: ds.a.sum())\n expected = expected.set_coords(\"c\")\n assert_identical(actual, expected)\n\n def test_assign_coords(self):\n ds = Dataset()\n\n actual = ds.assign(x=[0, 1, 2], y=2)\n actual = actual.assign_coords(x=list(\"abc\"))\n expected = Dataset({\"x\": list(\"abc\"), \"y\": 2})\n assert_identical(actual, expected)\n\n actual = ds.assign(x=[0, 1, 2], y=[2, 3])\n actual = actual.assign_coords({\"y\": [2.0, 3.0]})\n expected = ds.assign(x=[0, 1, 2], y=[2.0, 3.0])\n assert_identical(actual, expected)\n\n def test_assign_attrs(self):\n expected = Dataset(attrs=dict(a=1, b=2))\n new = Dataset()\n actual = new.assign_attrs(a=1, b=2)\n assert_identical(actual, expected)\n assert new.attrs == {}\n\n expected.attrs[\"c\"] = 3\n new_actual = actual.assign_attrs({\"c\": 3})\n assert_identical(new_actual, expected)\n assert actual.attrs == dict(a=1, b=2)\n\n def test_assign_multiindex_level(self):\n data = create_test_multiindex()\n with raises_regex(ValueError, \"conflicting MultiIndex\"):\n data.assign(level_1=range(4))\n data.assign_coords(level_1=range(4))\n # raise an Error when any level name is used as dimension GH:2299\n with pytest.raises(ValueError):\n data[\"y\"] = (\"level_1\", [0, 1])\n\n def test_merge_multiindex_level(self):\n data = create_test_multiindex()\n other = Dataset({\"z\": (\"level_1\", [0, 1])}) # conflict dimension\n with pytest.raises(ValueError):\n data.merge(other)\n other = Dataset({\"level_1\": (\"x\", [0, 1])}) # conflict variable name\n with pytest.raises(ValueError):\n data.merge(other)\n\n def test_setitem_original_non_unique_index(self):\n # regression test for GH943\n original = Dataset({\"data\": (\"x\", np.arange(5))}, coords={\"x\": [0, 1, 2, 0, 1]})\n expected = Dataset({\"data\": (\"x\", np.arange(5))}, {\"x\": range(5)})\n\n actual = original.copy()\n actual[\"x\"] = list(range(5))\n assert_identical(actual, expected)\n\n actual = original.copy()\n actual[\"x\"] = (\"x\", list(range(5)))\n assert_identical(actual, expected)\n\n actual = original.copy()\n actual.coords[\"x\"] = list(range(5))\n assert_identical(actual, expected)\n\n def test_setitem_both_non_unique_index(self):\n # regression test for GH956\n names = [\"joaquin\", \"manolo\", \"joaquin\"]\n values = np.random.randint(0, 256, (3, 4, 4))\n array = DataArray(\n values, dims=[\"name\", \"row\", \"column\"], coords=[names, range(4), range(4)]\n )\n expected = Dataset({\"first\": array, \"second\": array})\n actual = array.rename(\"first\").to_dataset()\n actual[\"second\"] = array\n assert_identical(expected, actual)\n\n def test_setitem_multiindex_level(self):\n data = create_test_multiindex()\n with raises_regex(ValueError, \"conflicting MultiIndex\"):\n data[\"level_1\"] = range(4)\n\n def test_delitem(self):\n data = create_test_data()\n all_items = set(data.variables)\n assert set(data.variables) == all_items\n del data[\"var1\"]\n assert set(data.variables) == all_items - {\"var1\"}\n del data[\"numbers\"]\n assert set(data.variables) == all_items - {\"var1\", \"numbers\"}\n assert \"numbers\" not in data.coords\n\n expected = Dataset()\n actual = Dataset({\"y\": (\"x\", [1, 2])})\n del actual[\"y\"]\n assert_identical(expected, actual)\n\n def test_squeeze(self):\n data = Dataset({\"foo\": ([\"x\", \"y\", \"z\"], [[[1], [2]]])})\n for args in [[], [[\"x\"]], [[\"x\", \"z\"]]]:\n\n def get_args(v):\n return [set(args[0]) & set(v.dims)] if args else []\n\n expected = Dataset(\n {k: v.squeeze(*get_args(v)) for k, v in data.variables.items()}\n )\n expected = expected.set_coords(data.coords)\n assert_identical(expected, data.squeeze(*args))\n # invalid squeeze\n with raises_regex(ValueError, \"cannot select a dimension\"):\n data.squeeze(\"y\")\n\n def test_squeeze_drop(self):\n data = Dataset({\"foo\": (\"x\", [1])}, {\"x\": [0]})\n expected = Dataset({\"foo\": 1})\n selected = data.squeeze(drop=True)\n assert_identical(expected, selected)\n\n expected = Dataset({\"foo\": 1}, {\"x\": 0})\n selected = data.squeeze(drop=False)\n assert_identical(expected, selected)\n\n data = Dataset({\"foo\": ((\"x\", \"y\"), [[1]])}, {\"x\": [0], \"y\": [0]})\n expected = Dataset({\"foo\": 1})\n selected = data.squeeze(drop=True)\n assert_identical(expected, selected)\n\n expected = Dataset({\"foo\": (\"x\", [1])}, {\"x\": [0]})\n selected = data.squeeze(dim=\"y\", drop=True)\n assert_identical(expected, selected)\n\n data = Dataset({\"foo\": ((\"x\",), [])}, {\"x\": []})\n selected = data.squeeze(drop=True)\n assert_identical(data, selected)\n\n def test_groupby(self):\n data = Dataset(\n {\"z\": ([\"x\", \"y\"], np.random.randn(3, 5))},\n {\"x\": (\"x\", list(\"abc\")), \"c\": (\"x\", [0, 1, 0]), \"y\": range(5)},\n )\n groupby = data.groupby(\"x\")\n assert len(groupby) == 3\n expected_groups = {\"a\": 0, \"b\": 1, \"c\": 2}\n assert groupby.groups == expected_groups\n expected_items = [\n (\"a\", data.isel(x=0)),\n (\"b\", data.isel(x=1)),\n (\"c\", data.isel(x=2)),\n ]\n for actual, expected in zip(groupby, expected_items):\n assert actual[0] == expected[0]\n assert_equal(actual[1], expected[1])\n\n def identity(x):\n return x\n\n for k in [\"x\", \"c\", \"y\"]:\n actual = data.groupby(k, squeeze=False).map(identity)\n assert_equal(data, actual)\n\n def test_groupby_returns_new_type(self):\n data = Dataset({\"z\": ([\"x\", \"y\"], np.random.randn(3, 5))})\n\n actual = data.groupby(\"x\").map(lambda ds: ds[\"z\"])\n expected = data[\"z\"]\n assert_identical(expected, actual)\n\n actual = data[\"z\"].groupby(\"x\").map(lambda x: x.to_dataset())\n expected = data\n assert_identical(expected, actual)\n\n def test_groupby_iter(self):\n data = create_test_data()\n for n, (t, sub) in enumerate(list(data.groupby(\"dim1\"))[:3]):\n assert data[\"dim1\"][n] == t\n assert_equal(data[\"var1\"][n], sub[\"var1\"])\n assert_equal(data[\"var2\"][n], sub[\"var2\"])\n assert_equal(data[\"var3\"][:, n], sub[\"var3\"])\n\n def test_groupby_errors(self):\n data = create_test_data()\n with raises_regex(TypeError, \"`group` must be\"):\n data.groupby(np.arange(10))\n with raises_regex(ValueError, \"length does not match\"):\n data.groupby(data[\"dim1\"][:3])\n with raises_regex(TypeError, \"`group` must be\"):\n data.groupby(data.coords[\"dim1\"].to_index())\n\n def test_groupby_reduce(self):\n data = Dataset(\n {\n \"xy\": ([\"x\", \"y\"], np.random.randn(3, 4)),\n \"xonly\": (\"x\", np.random.randn(3)),\n \"yonly\": (\"y\", np.random.randn(4)),\n \"letters\": (\"y\", [\"a\", \"a\", \"b\", \"b\"]),\n }\n )\n\n expected = data.mean(\"y\")\n expected[\"yonly\"] = expected[\"yonly\"].variable.set_dims({\"x\": 3})\n actual = data.groupby(\"x\").mean(...)\n assert_allclose(expected, actual)\n\n actual = data.groupby(\"x\").mean(\"y\")\n assert_allclose(expected, actual)\n\n letters = data[\"letters\"]\n expected = Dataset(\n {\n \"xy\": data[\"xy\"].groupby(letters).mean(...),\n \"xonly\": (data[\"xonly\"].mean().variable.set_dims({\"letters\": 2})),\n \"yonly\": data[\"yonly\"].groupby(letters).mean(),\n }\n )\n actual = data.groupby(\"letters\").mean(...)\n assert_allclose(expected, actual)\n\n def test_groupby_math(self):\n def reorder_dims(x):\n return x.transpose(\"dim1\", \"dim2\", \"dim3\", \"time\")\n\n ds = create_test_data()\n ds[\"dim1\"] = ds[\"dim1\"]\n for squeeze in [True, False]:\n grouped = ds.groupby(\"dim1\", squeeze=squeeze)\n\n expected = reorder_dims(ds + ds.coords[\"dim1\"])\n actual = grouped + ds.coords[\"dim1\"]\n assert_identical(expected, reorder_dims(actual))\n\n actual = ds.coords[\"dim1\"] + grouped\n assert_identical(expected, reorder_dims(actual))\n\n ds2 = 2 * ds\n expected = reorder_dims(ds + ds2)\n actual = grouped + ds2\n assert_identical(expected, reorder_dims(actual))\n\n actual = ds2 + grouped\n assert_identical(expected, reorder_dims(actual))\n\n grouped = ds.groupby(\"numbers\")\n zeros = DataArray([0, 0, 0, 0], [(\"numbers\", range(4))])\n expected = (ds + Variable(\"dim3\", np.zeros(10))).transpose(\n \"dim3\", \"dim1\", \"dim2\", \"time\"\n )\n actual = grouped + zeros\n assert_equal(expected, actual)\n\n actual = zeros + grouped\n assert_equal(expected, actual)\n\n with raises_regex(ValueError, \"incompat.* grouped binary\"):\n grouped + ds\n with raises_regex(ValueError, \"incompat.* grouped binary\"):\n ds + grouped\n with raises_regex(TypeError, \"only support binary ops\"):\n grouped + 1\n with raises_regex(TypeError, \"only support binary ops\"):\n grouped + grouped\n with raises_regex(TypeError, \"in-place operations\"):\n ds += grouped\n\n ds = Dataset(\n {\n \"x\": (\"time\", np.arange(100)),\n \"time\": pd.date_range(\"2000-01-01\", periods=100),\n }\n )\n with raises_regex(ValueError, \"incompat.* grouped binary\"):\n ds + ds.groupby(\"time.month\")\n\n def test_groupby_math_virtual(self):\n ds = Dataset(\n {\"x\": (\"t\", [1, 2, 3])}, {\"t\": pd.date_range(\"20100101\", periods=3)}\n )\n grouped = ds.groupby(\"t.day\")\n actual = grouped - grouped.mean(...)\n expected = Dataset({\"x\": (\"t\", [0, 0, 0])}, ds[[\"t\", \"t.day\"]])\n assert_identical(actual, expected)\n\n def test_groupby_nan(self):\n # nan should be excluded from groupby\n ds = Dataset({\"foo\": (\"x\", [1, 2, 3, 4])}, {\"bar\": (\"x\", [1, 1, 2, np.nan])})\n actual = ds.groupby(\"bar\").mean(...)\n expected = Dataset({\"foo\": (\"bar\", [1.5, 3]), \"bar\": [1, 2]})\n assert_identical(actual, expected)\n\n def test_groupby_order(self):\n # groupby should preserve variables order\n ds = Dataset()\n for vn in [\"a\", \"b\", \"c\"]:\n ds[vn] = DataArray(np.arange(10), dims=[\"t\"])\n data_vars_ref = list(ds.data_vars.keys())\n ds = ds.groupby(\"t\").mean(...)\n data_vars = list(ds.data_vars.keys())\n assert data_vars == data_vars_ref\n # coords are now at the end of the list, so the test below fails\n # all_vars = list(ds.variables.keys())\n # all_vars_ref = list(ds.variables.keys())\n # self.assertEqual(all_vars, all_vars_ref)\n\n def test_resample_and_first(self):\n times = pd.date_range(\"2000-01-01\", freq=\"6H\", periods=10)\n ds = Dataset(\n {\n \"foo\": ([\"time\", \"x\", \"y\"], np.random.randn(10, 5, 3)),\n \"bar\": (\"time\", np.random.randn(10), {\"meta\": \"data\"}),\n \"time\": times,\n }\n )\n\n actual = ds.resample(time=\"1D\").first(keep_attrs=True)\n expected = ds.isel(time=[0, 4, 8])\n assert_identical(expected, actual)\n\n # upsampling\n expected_time = pd.date_range(\"2000-01-01\", freq=\"3H\", periods=19)\n expected = ds.reindex(time=expected_time)\n actual = ds.resample(time=\"3H\")\n for how in [\"mean\", \"sum\", \"first\", \"last\"]:\n method = getattr(actual, how)\n result = method()\n assert_equal(expected, result)\n for method in [np.mean]:\n result = actual.reduce(method)\n assert_equal(expected, result)\n\n def test_resample_min_count(self):\n times = pd.date_range(\"2000-01-01\", freq=\"6H\", periods=10)\n ds = Dataset(\n {\n \"foo\": ([\"time\", \"x\", \"y\"], np.random.randn(10, 5, 3)),\n \"bar\": (\"time\", np.random.randn(10), {\"meta\": \"data\"}),\n \"time\": times,\n }\n )\n # inject nan\n ds[\"foo\"] = xr.where(ds[\"foo\"] > 2.0, np.nan, ds[\"foo\"])\n\n actual = ds.resample(time=\"1D\").sum(min_count=1)\n expected = xr.concat(\n [\n ds.isel(time=slice(i * 4, (i + 1) * 4)).sum(\"time\", min_count=1)\n for i in range(3)\n ],\n dim=actual[\"time\"],\n )\n assert_equal(expected, actual)\n\n def test_resample_by_mean_with_keep_attrs(self):\n times = pd.date_range(\"2000-01-01\", freq=\"6H\", periods=10)\n ds = Dataset(\n {\n \"foo\": ([\"time\", \"x\", \"y\"], np.random.randn(10, 5, 3)),\n \"bar\": (\"time\", np.random.randn(10), {\"meta\": \"data\"}),\n \"time\": times,\n }\n )\n ds.attrs[\"dsmeta\"] = \"dsdata\"\n\n resampled_ds = ds.resample(time=\"1D\").mean(keep_attrs=True)\n actual = resampled_ds[\"bar\"].attrs\n expected = ds[\"bar\"].attrs\n assert expected == actual\n\n actual = resampled_ds.attrs\n expected = ds.attrs\n assert expected == actual\n\n def test_resample_loffset(self):\n times = pd.date_range(\"2000-01-01\", freq=\"6H\", periods=10)\n ds = Dataset(\n {\n \"foo\": ([\"time\", \"x\", \"y\"], np.random.randn(10, 5, 3)),\n \"bar\": (\"time\", np.random.randn(10), {\"meta\": \"data\"}),\n \"time\": times,\n }\n )\n ds.attrs[\"dsmeta\"] = \"dsdata\"\n\n actual = ds.resample(time=\"24H\", loffset=\"-12H\").mean(\"time\").time\n expected = xr.DataArray(\n ds.bar.to_series().resample(\"24H\", loffset=\"-12H\").mean()\n ).time\n assert_identical(expected, actual)\n\n def test_resample_by_mean_discarding_attrs(self):\n times = pd.date_range(\"2000-01-01\", freq=\"6H\", periods=10)\n ds = Dataset(\n {\n \"foo\": ([\"time\", \"x\", \"y\"], np.random.randn(10, 5, 3)),\n \"bar\": (\"time\", np.random.randn(10), {\"meta\": \"data\"}),\n \"time\": times,\n }\n )\n ds.attrs[\"dsmeta\"] = \"dsdata\"\n\n resampled_ds = ds.resample(time=\"1D\").mean(keep_attrs=False)\n\n assert resampled_ds[\"bar\"].attrs == {}\n assert resampled_ds.attrs == {}\n\n def test_resample_by_last_discarding_attrs(self):\n times = pd.date_range(\"2000-01-01\", freq=\"6H\", periods=10)\n ds = Dataset(\n {\n \"foo\": ([\"time\", \"x\", \"y\"], np.random.randn(10, 5, 3)),\n \"bar\": (\"time\", np.random.randn(10), {\"meta\": \"data\"}),\n \"time\": times,\n }\n )\n ds.attrs[\"dsmeta\"] = \"dsdata\"\n\n resampled_ds = ds.resample(time=\"1D\").last(keep_attrs=False)\n\n assert resampled_ds[\"bar\"].attrs == {}\n assert resampled_ds.attrs == {}\n\n @requires_scipy\n def test_resample_drop_nondim_coords(self):\n xs = np.arange(6)\n ys = np.arange(3)\n times = pd.date_range(\"2000-01-01\", freq=\"6H\", periods=5)\n data = np.tile(np.arange(5), (6, 3, 1))\n xx, yy = np.meshgrid(xs * 5, ys * 2.5)\n tt = np.arange(len(times), dtype=int)\n array = DataArray(data, {\"time\": times, \"x\": xs, \"y\": ys}, (\"x\", \"y\", \"time\"))\n xcoord = DataArray(xx.T, {\"x\": xs, \"y\": ys}, (\"x\", \"y\"))\n ycoord = DataArray(yy.T, {\"x\": xs, \"y\": ys}, (\"x\", \"y\"))\n tcoord = DataArray(tt, {\"time\": times}, (\"time\",))\n ds = Dataset({\"data\": array, \"xc\": xcoord, \"yc\": ycoord, \"tc\": tcoord})\n ds = ds.set_coords([\"xc\", \"yc\", \"tc\"])\n\n # Re-sample\n actual = ds.resample(time=\"12H\").mean(\"time\")\n assert \"tc\" not in actual.coords\n\n # Up-sample - filling\n actual = ds.resample(time=\"1H\").ffill()\n assert \"tc\" not in actual.coords\n\n # Up-sample - interpolation\n actual = ds.resample(time=\"1H\").interpolate(\"linear\")\n assert \"tc\" not in actual.coords\n\n def test_resample_old_api(self):\n\n times = pd.date_range(\"2000-01-01\", freq=\"6H\", periods=10)\n ds = Dataset(\n {\n \"foo\": ([\"time\", \"x\", \"y\"], np.random.randn(10, 5, 3)),\n \"bar\": (\"time\", np.random.randn(10), {\"meta\": \"data\"}),\n \"time\": times,\n }\n )\n\n with raises_regex(TypeError, r\"resample\\(\\) no longer supports\"):\n ds.resample(\"1D\", \"time\")\n\n with raises_regex(TypeError, r\"resample\\(\\) no longer supports\"):\n ds.resample(\"1D\", dim=\"time\", how=\"mean\")\n\n with raises_regex(TypeError, r\"resample\\(\\) no longer supports\"):\n ds.resample(\"1D\", dim=\"time\")\n\n def test_resample_ds_da_are_the_same(self):\n time = pd.date_range(\"2000-01-01\", freq=\"6H\", periods=365 * 4)\n ds = xr.Dataset(\n {\n \"foo\": ((\"time\", \"x\"), np.random.randn(365 * 4, 5)),\n \"time\": time,\n \"x\": np.arange(5),\n }\n )\n assert_identical(\n ds.resample(time=\"M\").mean()[\"foo\"], ds.foo.resample(time=\"M\").mean()\n )\n\n def test_ds_resample_apply_func_args(self):\n def func(arg1, arg2, arg3=0.0):\n return arg1.mean(\"time\") + arg2 + arg3\n\n times = pd.date_range(\"2000\", freq=\"D\", periods=3)\n ds = xr.Dataset({\"foo\": (\"time\", [1.0, 1.0, 1.0]), \"time\": times})\n expected = xr.Dataset({\"foo\": (\"time\", [3.0, 3.0, 3.0]), \"time\": times})\n actual = ds.resample(time=\"D\").map(func, args=(1.0,), arg3=1.0)\n assert_identical(expected, actual)\n\n def test_to_array(self):\n ds = Dataset(\n {\"a\": 1, \"b\": (\"x\", [1, 2, 3])},\n coords={\"c\": 42},\n attrs={\"Conventions\": \"None\"},\n )\n data = [[1, 1, 1], [1, 2, 3]]\n coords = {\"c\": 42, \"variable\": [\"a\", \"b\"]}\n dims = (\"variable\", \"x\")\n expected = DataArray(data, coords, dims, attrs=ds.attrs)\n actual = ds.to_array()\n assert_identical(expected, actual)\n\n actual = ds.to_array(\"abc\", name=\"foo\")\n expected = expected.rename({\"variable\": \"abc\"}).rename(\"foo\")\n assert_identical(expected, actual)\n\n def test_to_and_from_dataframe(self):\n x = np.random.randn(10)\n y = np.random.randn(10)\n t = list(\"abcdefghij\")\n ds = Dataset({\"a\": (\"t\", x), \"b\": (\"t\", y), \"t\": (\"t\", t)})\n expected = pd.DataFrame(\n np.array([x, y]).T, columns=[\"a\", \"b\"], index=pd.Index(t, name=\"t\")\n )\n actual = ds.to_dataframe()\n # use the .equals method to check all DataFrame metadata\n assert expected.equals(actual), (expected, actual)\n\n # verify coords are included\n actual = ds.set_coords(\"b\").to_dataframe()\n assert expected.equals(actual), (expected, actual)\n\n # check roundtrip\n assert_identical(ds, Dataset.from_dataframe(actual))\n\n # test a case with a MultiIndex\n w = np.random.randn(2, 3)\n ds = Dataset({\"w\": ((\"x\", \"y\"), w)})\n ds[\"y\"] = (\"y\", list(\"abc\"))\n exp_index = pd.MultiIndex.from_arrays(\n [[0, 0, 0, 1, 1, 1], [\"a\", \"b\", \"c\", \"a\", \"b\", \"c\"]], names=[\"x\", \"y\"]\n )\n expected = pd.DataFrame(w.reshape(-1), columns=[\"w\"], index=exp_index)\n actual = ds.to_dataframe()\n assert expected.equals(actual)\n\n # check roundtrip\n assert_identical(ds.assign_coords(x=[0, 1]), Dataset.from_dataframe(actual))\n\n # Check multiindex reordering\n new_order = [\"x\", \"y\"]\n actual = ds.to_dataframe(dim_order=new_order)\n assert expected.equals(actual)\n\n new_order = [\"y\", \"x\"]\n exp_index = pd.MultiIndex.from_arrays(\n [[\"a\", \"a\", \"b\", \"b\", \"c\", \"c\"], [0, 1, 0, 1, 0, 1]], names=[\"y\", \"x\"]\n )\n expected = pd.DataFrame(\n w.transpose().reshape(-1), columns=[\"w\"], index=exp_index\n )\n actual = ds.to_dataframe(dim_order=new_order)\n assert expected.equals(actual)\n\n invalid_order = [\"x\"]\n with pytest.raises(\n ValueError, match=\"does not match the set of dimensions of this\"\n ):\n ds.to_dataframe(dim_order=invalid_order)\n\n invalid_order = [\"x\", \"z\"]\n with pytest.raises(\n ValueError, match=\"does not match the set of dimensions of this\"\n ):\n ds.to_dataframe(dim_order=invalid_order)\n\n # check pathological cases\n df = pd.DataFrame([1])\n actual = Dataset.from_dataframe(df)\n expected = Dataset({0: (\"index\", [1])}, {\"index\": [0]})\n assert_identical(expected, actual)\n\n df = pd.DataFrame()\n actual = Dataset.from_dataframe(df)\n expected = Dataset(coords={\"index\": []})\n assert_identical(expected, actual)\n\n # GH697\n df = pd.DataFrame({\"A\": []})\n actual = Dataset.from_dataframe(df)\n expected = Dataset({\"A\": DataArray([], dims=(\"index\",))}, {\"index\": []})\n assert_identical(expected, actual)\n\n # regression test for GH278\n # use int64 to ensure consistent results for the pandas .equals method\n # on windows (which requires the same dtype)\n ds = Dataset({\"x\": pd.Index([\"bar\"]), \"a\": (\"y\", np.array([1], \"int64\"))}).isel(\n x=0\n )\n # use .loc to ensure consistent results on Python 3\n actual = ds.to_dataframe().loc[:, [\"a\", \"x\"]]\n expected = pd.DataFrame(\n [[1, \"bar\"]], index=pd.Index([0], name=\"y\"), columns=[\"a\", \"x\"]\n )\n assert expected.equals(actual), (expected, actual)\n\n ds = Dataset({\"x\": np.array([0], \"int64\"), \"y\": np.array([1], \"int64\")})\n actual = ds.to_dataframe()\n idx = pd.MultiIndex.from_arrays([[0], [1]], names=[\"x\", \"y\"])\n expected = pd.DataFrame([[]], index=idx)\n assert expected.equals(actual), (expected, actual)\n\n def test_from_dataframe_categorical(self):\n cat = pd.CategoricalDtype(\n categories=[\"foo\", \"bar\", \"baz\", \"qux\", \"quux\", \"corge\"]\n )\n i1 = pd.Series([\"foo\", \"bar\", \"foo\"], dtype=cat)\n i2 = pd.Series([\"bar\", \"bar\", \"baz\"], dtype=cat)\n\n df = pd.DataFrame({\"i1\": i1, \"i2\": i2, \"values\": [1, 2, 3]})\n ds = df.set_index(\"i1\").to_xarray()\n assert len(ds[\"i1\"]) == 3\n\n ds = df.set_index([\"i1\", \"i2\"]).to_xarray()\n assert len(ds[\"i1\"]) == 2\n assert len(ds[\"i2\"]) == 2\n\n @requires_sparse\n def test_from_dataframe_sparse(self):\n import sparse\n\n df_base = pd.DataFrame(\n {\"x\": range(10), \"y\": list(\"abcdefghij\"), \"z\": np.arange(0, 100, 10)}\n )\n\n ds_sparse = Dataset.from_dataframe(df_base.set_index(\"x\"), sparse=True)\n ds_dense = Dataset.from_dataframe(df_base.set_index(\"x\"), sparse=False)\n assert isinstance(ds_sparse[\"y\"].data, sparse.COO)\n assert isinstance(ds_sparse[\"z\"].data, sparse.COO)\n ds_sparse[\"y\"].data = ds_sparse[\"y\"].data.todense()\n ds_sparse[\"z\"].data = ds_sparse[\"z\"].data.todense()\n assert_identical(ds_dense, ds_sparse)\n\n ds_sparse = Dataset.from_dataframe(df_base.set_index([\"x\", \"y\"]), sparse=True)\n ds_dense = Dataset.from_dataframe(df_base.set_index([\"x\", \"y\"]), sparse=False)\n assert isinstance(ds_sparse[\"z\"].data, sparse.COO)\n ds_sparse[\"z\"].data = ds_sparse[\"z\"].data.todense()\n assert_identical(ds_dense, ds_sparse)\n\n def test_to_and_from_empty_dataframe(self):\n # GH697\n expected = pd.DataFrame({\"foo\": []})\n ds = Dataset.from_dataframe(expected)\n assert len(ds[\"foo\"]) == 0\n actual = ds.to_dataframe()\n assert len(actual) == 0\n assert expected.equals(actual)\n\n def test_from_dataframe_multiindex(self):\n index = pd.MultiIndex.from_product([[\"a\", \"b\"], [1, 2, 3]], names=[\"x\", \"y\"])\n df = pd.DataFrame({\"z\": np.arange(6)}, index=index)\n\n expected = Dataset(\n {\"z\": ((\"x\", \"y\"), [[0, 1, 2], [3, 4, 5]])},\n coords={\"x\": [\"a\", \"b\"], \"y\": [1, 2, 3]},\n )\n actual = Dataset.from_dataframe(df)\n assert_identical(actual, expected)\n\n df2 = df.iloc[[3, 2, 1, 0, 4, 5], :]\n actual = Dataset.from_dataframe(df2)\n assert_identical(actual, expected)\n\n df3 = df.iloc[:4, :]\n expected3 = Dataset(\n {\"z\": ((\"x\", \"y\"), [[0, 1, 2], [3, np.nan, np.nan]])},\n coords={\"x\": [\"a\", \"b\"], \"y\": [1, 2, 3]},\n )\n actual = Dataset.from_dataframe(df3)\n assert_identical(actual, expected3)\n\n df_nonunique = df.iloc[[0, 0], :]\n with raises_regex(ValueError, \"non-unique MultiIndex\"):\n Dataset.from_dataframe(df_nonunique)\n\n def test_from_dataframe_unsorted_levels(self):\n # regression test for GH-4186\n index = pd.MultiIndex(\n levels=[[\"b\", \"a\"], [\"foo\"]], codes=[[0, 1], [0, 0]], names=[\"lev1\", \"lev2\"]\n )\n df = pd.DataFrame({\"c1\": [0, 2], \"c2\": [1, 3]}, index=index)\n expected = Dataset(\n {\n \"c1\": ((\"lev1\", \"lev2\"), [[0], [2]]),\n \"c2\": ((\"lev1\", \"lev2\"), [[1], [3]]),\n },\n coords={\"lev1\": [\"b\", \"a\"], \"lev2\": [\"foo\"]},\n )\n actual = Dataset.from_dataframe(df)\n assert_identical(actual, expected)\n\n def test_from_dataframe_non_unique_columns(self):\n # regression test for GH449\n df = pd.DataFrame(np.zeros((2, 2)))\n df.columns = [\"foo\", \"foo\"]\n with raises_regex(ValueError, \"non-unique columns\"):\n Dataset.from_dataframe(df)\n\n def test_convert_dataframe_with_many_types_and_multiindex(self):\n # regression test for GH737\n df = pd.DataFrame(\n {\n \"a\": list(\"abc\"),\n \"b\": list(range(1, 4)),\n \"c\": np.arange(3, 6).astype(\"u1\"),\n \"d\": np.arange(4.0, 7.0, dtype=\"float64\"),\n \"e\": [True, False, True],\n \"f\": pd.Categorical(list(\"abc\")),\n \"g\": pd.date_range(\"20130101\", periods=3),\n \"h\": pd.date_range(\"20130101\", periods=3, tz=\"US/Eastern\"),\n }\n )\n df.index = pd.MultiIndex.from_product([[\"a\"], range(3)], names=[\"one\", \"two\"])\n roundtripped = Dataset.from_dataframe(df).to_dataframe()\n # we can't do perfectly, but we should be at least as faithful as\n # np.asarray\n expected = df.apply(np.asarray)\n assert roundtripped.equals(expected)\n\n def test_to_and_from_dict(self):\n # <xarray.Dataset>\n # Dimensions: (t: 10)\n # Coordinates:\n # * t (t) <U1 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j'\n # Data variables:\n # a (t) float64 0.6916 -1.056 -1.163 0.9792 -0.7865 ...\n # b (t) float64 1.32 0.1954 1.91 1.39 0.519 -0.2772 ...\n x = np.random.randn(10)\n y = np.random.randn(10)\n t = list(\"abcdefghij\")\n ds = Dataset({\"a\": (\"t\", x), \"b\": (\"t\", y), \"t\": (\"t\", t)})\n expected = {\n \"coords\": {\"t\": {\"dims\": (\"t\",), \"data\": t, \"attrs\": {}}},\n \"attrs\": {},\n \"dims\": {\"t\": 10},\n \"data_vars\": {\n \"a\": {\"dims\": (\"t\",), \"data\": x.tolist(), \"attrs\": {}},\n \"b\": {\"dims\": (\"t\",), \"data\": y.tolist(), \"attrs\": {}},\n },\n }\n\n actual = ds.to_dict()\n\n # check that they are identical\n assert expected == actual\n\n # check roundtrip\n assert_identical(ds, Dataset.from_dict(actual))\n\n # check the data=False option\n expected_no_data = expected.copy()\n del expected_no_data[\"coords\"][\"t\"][\"data\"]\n del expected_no_data[\"data_vars\"][\"a\"][\"data\"]\n del expected_no_data[\"data_vars\"][\"b\"][\"data\"]\n endiantype = \"<U1\" if sys.byteorder == \"little\" else \">U1\"\n expected_no_data[\"coords\"][\"t\"].update({\"dtype\": endiantype, \"shape\": (10,)})\n expected_no_data[\"data_vars\"][\"a\"].update({\"dtype\": \"float64\", \"shape\": (10,)})\n expected_no_data[\"data_vars\"][\"b\"].update({\"dtype\": \"float64\", \"shape\": (10,)})\n actual_no_data = ds.to_dict(data=False)\n assert expected_no_data == actual_no_data\n\n # verify coords are included roundtrip\n expected_ds = ds.set_coords(\"b\")\n actual = Dataset.from_dict(expected_ds.to_dict())\n\n assert_identical(expected_ds, actual)\n\n # test some incomplete dicts:\n # this one has no attrs field, the dims are strings, and x, y are\n # np.arrays\n\n d = {\n \"coords\": {\"t\": {\"dims\": \"t\", \"data\": t}},\n \"dims\": \"t\",\n \"data_vars\": {\"a\": {\"dims\": \"t\", \"data\": x}, \"b\": {\"dims\": \"t\", \"data\": y}},\n }\n assert_identical(ds, Dataset.from_dict(d))\n\n # this is kind of a flattened version with no coords, or data_vars\n d = {\n \"a\": {\"dims\": \"t\", \"data\": x},\n \"t\": {\"data\": t, \"dims\": \"t\"},\n \"b\": {\"dims\": \"t\", \"data\": y},\n }\n assert_identical(ds, Dataset.from_dict(d))\n\n # this one is missing some necessary information\n d = {\n \"a\": {\"data\": x},\n \"t\": {\"data\": t, \"dims\": \"t\"},\n \"b\": {\"dims\": \"t\", \"data\": y},\n }\n with raises_regex(ValueError, \"cannot convert dict without the key 'dims'\"):\n Dataset.from_dict(d)\n\n def test_to_and_from_dict_with_time_dim(self):\n x = np.random.randn(10, 3)\n y = np.random.randn(10, 3)\n t = pd.date_range(\"20130101\", periods=10)\n lat = [77.7, 83.2, 76]\n ds = Dataset(\n {\n \"a\": ([\"t\", \"lat\"], x),\n \"b\": ([\"t\", \"lat\"], y),\n \"t\": (\"t\", t),\n \"lat\": (\"lat\", lat),\n }\n )\n roundtripped = Dataset.from_dict(ds.to_dict())\n assert_identical(ds, roundtripped)\n\n def test_to_and_from_dict_with_nan_nat(self):\n x = np.random.randn(10, 3)\n y = np.random.randn(10, 3)\n y[2] = np.nan\n t = pd.Series(pd.date_range(\"20130101\", periods=10))\n t[2] = np.nan\n\n lat = [77.7, 83.2, 76]\n ds = Dataset(\n {\n \"a\": ([\"t\", \"lat\"], x),\n \"b\": ([\"t\", \"lat\"], y),\n \"t\": (\"t\", t),\n \"lat\": (\"lat\", lat),\n }\n )\n roundtripped = Dataset.from_dict(ds.to_dict())\n assert_identical(ds, roundtripped)\n\n def test_to_dict_with_numpy_attrs(self):\n # this doesn't need to roundtrip\n x = np.random.randn(10)\n y = np.random.randn(10)\n t = list(\"abcdefghij\")\n attrs = {\n \"created\": np.float64(1998),\n \"coords\": np.array([37, -110.1, 100]),\n \"maintainer\": \"bar\",\n }\n ds = Dataset({\"a\": (\"t\", x, attrs), \"b\": (\"t\", y, attrs), \"t\": (\"t\", t)})\n expected_attrs = {\n \"created\": attrs[\"created\"].item(),\n \"coords\": attrs[\"coords\"].tolist(),\n \"maintainer\": \"bar\",\n }\n actual = ds.to_dict()\n\n # check that they are identical\n assert expected_attrs == actual[\"data_vars\"][\"a\"][\"attrs\"]\n\n def test_pickle(self):\n data = create_test_data()\n roundtripped = pickle.loads(pickle.dumps(data))\n assert_identical(data, roundtripped)\n # regression test for #167:\n assert data.dims == roundtripped.dims\n\n def test_lazy_load(self):\n store = InaccessibleVariableDataStore()\n create_test_data().dump_to_store(store)\n\n for decode_cf in [True, False]:\n ds = open_dataset(store, decode_cf=decode_cf)\n with pytest.raises(UnexpectedDataAccess):\n ds.load()\n with pytest.raises(UnexpectedDataAccess):\n ds[\"var1\"].values\n\n # these should not raise UnexpectedDataAccess:\n ds.isel(time=10)\n ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1)\n\n def test_dropna(self):\n x = np.random.randn(4, 4)\n x[::2, 0] = np.nan\n y = np.random.randn(4)\n y[-1] = np.nan\n ds = Dataset({\"foo\": ((\"a\", \"b\"), x), \"bar\": ((\"b\", y))})\n\n expected = ds.isel(a=slice(1, None, 2))\n actual = ds.dropna(\"a\")\n assert_identical(actual, expected)\n\n expected = ds.isel(b=slice(1, 3))\n actual = ds.dropna(\"b\")\n assert_identical(actual, expected)\n\n actual = ds.dropna(\"b\", subset=[\"foo\", \"bar\"])\n assert_identical(actual, expected)\n\n expected = ds.isel(b=slice(1, None))\n actual = ds.dropna(\"b\", subset=[\"foo\"])\n assert_identical(actual, expected)\n\n expected = ds.isel(b=slice(3))\n actual = ds.dropna(\"b\", subset=[\"bar\"])\n assert_identical(actual, expected)\n\n actual = ds.dropna(\"a\", subset=[])\n assert_identical(actual, ds)\n\n actual = ds.dropna(\"a\", subset=[\"bar\"])\n assert_identical(actual, ds)\n\n actual = ds.dropna(\"a\", how=\"all\")\n assert_identical(actual, ds)\n\n actual = ds.dropna(\"b\", how=\"all\", subset=[\"bar\"])\n expected = ds.isel(b=[0, 1, 2])\n assert_identical(actual, expected)\n\n actual = ds.dropna(\"b\", thresh=1, subset=[\"bar\"])\n assert_identical(actual, expected)\n\n actual = ds.dropna(\"b\", thresh=2)\n assert_identical(actual, ds)\n\n actual = ds.dropna(\"b\", thresh=4)\n expected = ds.isel(b=[1, 2, 3])\n assert_identical(actual, expected)\n\n actual = ds.dropna(\"a\", thresh=3)\n expected = ds.isel(a=[1, 3])\n assert_identical(actual, ds)\n\n with raises_regex(ValueError, \"a single dataset dimension\"):\n ds.dropna(\"foo\")\n with raises_regex(ValueError, \"invalid how\"):\n ds.dropna(\"a\", how=\"somehow\")\n with raises_regex(TypeError, \"must specify how or thresh\"):\n ds.dropna(\"a\", how=None)\n\n def test_fillna(self):\n ds = Dataset({\"a\": (\"x\", [np.nan, 1, np.nan, 3])}, {\"x\": [0, 1, 2, 3]})\n\n # fill with -1\n actual = ds.fillna(-1)\n expected = Dataset({\"a\": (\"x\", [-1, 1, -1, 3])}, {\"x\": [0, 1, 2, 3]})\n assert_identical(expected, actual)\n\n actual = ds.fillna({\"a\": -1})\n assert_identical(expected, actual)\n\n other = Dataset({\"a\": -1})\n actual = ds.fillna(other)\n assert_identical(expected, actual)\n\n actual = ds.fillna({\"a\": other.a})\n assert_identical(expected, actual)\n\n # fill with range(4)\n b = DataArray(range(4), coords=[(\"x\", range(4))])\n actual = ds.fillna(b)\n expected = b.rename(\"a\").to_dataset()\n assert_identical(expected, actual)\n\n actual = ds.fillna(expected)\n assert_identical(expected, actual)\n\n actual = ds.fillna(range(4))\n assert_identical(expected, actual)\n\n actual = ds.fillna(b[:3])\n assert_identical(expected, actual)\n\n # okay to only include some data variables\n ds[\"b\"] = np.nan\n actual = ds.fillna({\"a\": -1})\n expected = Dataset(\n {\"a\": (\"x\", [-1, 1, -1, 3]), \"b\": np.nan}, {\"x\": [0, 1, 2, 3]}\n )\n assert_identical(expected, actual)\n\n # but new data variables is not okay\n with raises_regex(ValueError, \"must be contained\"):\n ds.fillna({\"x\": 0})\n\n # empty argument should be OK\n result = ds.fillna({})\n assert_identical(ds, result)\n\n result = ds.fillna(Dataset(coords={\"c\": 42}))\n expected = ds.assign_coords(c=42)\n assert_identical(expected, result)\n\n # groupby\n expected = Dataset({\"a\": (\"x\", range(4))}, {\"x\": [0, 1, 2, 3]})\n for target in [ds, expected]:\n target.coords[\"b\"] = (\"x\", [0, 0, 1, 1])\n actual = ds.groupby(\"b\").fillna(DataArray([0, 2], dims=\"b\"))\n assert_identical(expected, actual)\n\n actual = ds.groupby(\"b\").fillna(Dataset({\"a\": (\"b\", [0, 2])}))\n assert_identical(expected, actual)\n\n # attrs with groupby\n ds.attrs[\"attr\"] = \"ds\"\n ds.a.attrs[\"attr\"] = \"da\"\n actual = ds.groupby(\"b\").fillna(Dataset({\"a\": (\"b\", [0, 2])}))\n assert actual.attrs == ds.attrs\n assert actual.a.name == \"a\"\n assert actual.a.attrs == ds.a.attrs\n\n da = DataArray(range(5), name=\"a\", attrs={\"attr\": \"da\"})\n actual = da.fillna(1)\n assert actual.name == \"a\"\n assert actual.attrs == da.attrs\n\n ds = Dataset({\"a\": da}, attrs={\"attr\": \"ds\"})\n actual = ds.fillna({\"a\": 1})\n assert actual.attrs == ds.attrs\n assert actual.a.name == \"a\"\n assert actual.a.attrs == ds.a.attrs\n\n def test_where(self):\n ds = Dataset({\"a\": (\"x\", range(5))})\n expected = Dataset({\"a\": (\"x\", [np.nan, np.nan, 2, 3, 4])})\n actual = ds.where(ds > 1)\n assert_identical(expected, actual)\n\n actual = ds.where(ds.a > 1)\n assert_identical(expected, actual)\n\n actual = ds.where(ds.a.values > 1)\n assert_identical(expected, actual)\n\n actual = ds.where(True)\n assert_identical(ds, actual)\n\n expected = ds.copy(deep=True)\n expected[\"a\"].values = [np.nan] * 5\n actual = ds.where(False)\n assert_identical(expected, actual)\n\n # 2d\n ds = Dataset({\"a\": ((\"x\", \"y\"), [[0, 1], [2, 3]])})\n expected = Dataset({\"a\": ((\"x\", \"y\"), [[np.nan, 1], [2, 3]])})\n actual = ds.where(ds > 0)\n assert_identical(expected, actual)\n\n # groupby\n ds = Dataset({\"a\": (\"x\", range(5))}, {\"c\": (\"x\", [0, 0, 1, 1, 1])})\n cond = Dataset({\"a\": (\"c\", [True, False])})\n expected = ds.copy(deep=True)\n expected[\"a\"].values = [0, 1] + [np.nan] * 3\n actual = ds.groupby(\"c\").where(cond)\n assert_identical(expected, actual)\n\n # attrs with groupby\n ds.attrs[\"attr\"] = \"ds\"\n ds.a.attrs[\"attr\"] = \"da\"\n actual = ds.groupby(\"c\").where(cond)\n assert actual.attrs == ds.attrs\n assert actual.a.name == \"a\"\n assert actual.a.attrs == ds.a.attrs\n\n # attrs\n da = DataArray(range(5), name=\"a\", attrs={\"attr\": \"da\"})\n actual = da.where(da.values > 1)\n assert actual.name == \"a\"\n assert actual.attrs == da.attrs\n\n ds = Dataset({\"a\": da}, attrs={\"attr\": \"ds\"})\n actual = ds.where(ds > 0)\n assert actual.attrs == ds.attrs\n assert actual.a.name == \"a\"\n assert actual.a.attrs == ds.a.attrs\n\n # lambda\n ds = Dataset({\"a\": (\"x\", range(5))})\n expected = Dataset({\"a\": (\"x\", [np.nan, np.nan, 2, 3, 4])})\n actual = ds.where(lambda x: x > 1)\n assert_identical(expected, actual)\n\n def test_where_other(self):\n ds = Dataset({\"a\": (\"x\", range(5))}, {\"x\": range(5)})\n expected = Dataset({\"a\": (\"x\", [-1, -1, 2, 3, 4])}, {\"x\": range(5)})\n actual = ds.where(ds > 1, -1)\n assert_equal(expected, actual)\n assert actual.a.dtype == int\n\n actual = ds.where(lambda x: x > 1, -1)\n assert_equal(expected, actual)\n\n with raises_regex(ValueError, \"cannot set\"):\n ds.where(ds > 1, other=0, drop=True)\n\n with raises_regex(ValueError, \"indexes .* are not equal\"):\n ds.where(ds > 1, ds.isel(x=slice(3)))\n\n with raises_regex(ValueError, \"exact match required\"):\n ds.where(ds > 1, ds.assign(b=2))\n\n def test_where_drop(self):\n # if drop=True\n\n # 1d\n # data array case\n array = DataArray(range(5), coords=[range(5)], dims=[\"x\"])\n expected = DataArray(range(5)[2:], coords=[range(5)[2:]], dims=[\"x\"])\n actual = array.where(array > 1, drop=True)\n assert_identical(expected, actual)\n\n # dataset case\n ds = Dataset({\"a\": array})\n expected = Dataset({\"a\": expected})\n\n actual = ds.where(ds > 1, drop=True)\n assert_identical(expected, actual)\n\n actual = ds.where(ds.a > 1, drop=True)\n assert_identical(expected, actual)\n\n with raises_regex(TypeError, \"must be a\"):\n ds.where(np.arange(5) > 1, drop=True)\n\n # 1d with odd coordinates\n array = DataArray(\n np.array([2, 7, 1, 8, 3]), coords=[np.array([3, 1, 4, 5, 9])], dims=[\"x\"]\n )\n expected = DataArray(\n np.array([7, 8, 3]), coords=[np.array([1, 5, 9])], dims=[\"x\"]\n )\n actual = array.where(array > 2, drop=True)\n assert_identical(expected, actual)\n\n # 1d multiple variables\n ds = Dataset({\"a\": ((\"x\"), [0, 1, 2, 3]), \"b\": ((\"x\"), [4, 5, 6, 7])})\n expected = Dataset(\n {\"a\": ((\"x\"), [np.nan, 1, 2, 3]), \"b\": ((\"x\"), [4, 5, 6, np.nan])}\n )\n actual = ds.where((ds > 0) & (ds < 7), drop=True)\n assert_identical(expected, actual)\n\n # 2d\n ds = Dataset({\"a\": ((\"x\", \"y\"), [[0, 1], [2, 3]])})\n expected = Dataset({\"a\": ((\"x\", \"y\"), [[np.nan, 1], [2, 3]])})\n actual = ds.where(ds > 0, drop=True)\n assert_identical(expected, actual)\n\n # 2d with odd coordinates\n ds = Dataset(\n {\"a\": ((\"x\", \"y\"), [[0, 1], [2, 3]])},\n coords={\n \"x\": [4, 3],\n \"y\": [1, 2],\n \"z\": ([\"x\", \"y\"], [[np.e, np.pi], [np.pi * np.e, np.pi * 3]]),\n },\n )\n expected = Dataset(\n {\"a\": ((\"x\", \"y\"), [[3]])},\n coords={\"x\": [3], \"y\": [2], \"z\": ([\"x\", \"y\"], [[np.pi * 3]])},\n )\n actual = ds.where(ds > 2, drop=True)\n assert_identical(expected, actual)\n\n # 2d multiple variables\n ds = Dataset(\n {\"a\": ((\"x\", \"y\"), [[0, 1], [2, 3]]), \"b\": ((\"x\", \"y\"), [[4, 5], [6, 7]])}\n )\n expected = Dataset(\n {\n \"a\": ((\"x\", \"y\"), [[np.nan, 1], [2, 3]]),\n \"b\": ((\"x\", \"y\"), [[4, 5], [6, 7]]),\n }\n )\n actual = ds.where(ds > 0, drop=True)\n assert_identical(expected, actual)\n\n def test_where_drop_empty(self):\n # regression test for GH1341\n array = DataArray(np.random.rand(100, 10), dims=[\"nCells\", \"nVertLevels\"])\n mask = DataArray(np.zeros((100,), dtype=\"bool\"), dims=\"nCells\")\n actual = array.where(mask, drop=True)\n expected = DataArray(np.zeros((0, 10)), dims=[\"nCells\", \"nVertLevels\"])\n assert_identical(expected, actual)\n\n def test_where_drop_no_indexes(self):\n ds = Dataset({\"foo\": (\"x\", [0.0, 1.0])})\n expected = Dataset({\"foo\": (\"x\", [1.0])})\n actual = ds.where(ds == 1, drop=True)\n assert_identical(expected, actual)\n\n def test_reduce(self):\n data = create_test_data()\n\n assert len(data.mean().coords) == 0\n\n actual = data.max()\n expected = Dataset({k: v.max() for k, v in data.data_vars.items()})\n assert_equal(expected, actual)\n\n assert_equal(data.min(dim=[\"dim1\"]), data.min(dim=\"dim1\"))\n\n for reduct, expected in [\n (\"dim2\", [\"dim1\", \"dim3\", \"time\"]),\n ([\"dim2\", \"time\"], [\"dim1\", \"dim3\"]),\n ((\"dim2\", \"time\"), [\"dim1\", \"dim3\"]),\n ((), [\"dim1\", \"dim2\", \"dim3\", \"time\"]),\n ]:\n actual = list(data.min(dim=reduct).dims)\n assert actual == expected\n\n assert_equal(data.mean(dim=[]), data)\n\n def test_reduce_coords(self):\n # regression test for GH1470\n data = xr.Dataset({\"a\": (\"x\", [1, 2, 3])}, coords={\"b\": 4})\n expected = xr.Dataset({\"a\": 2}, coords={\"b\": 4})\n actual = data.mean(\"x\")\n assert_identical(actual, expected)\n\n # should be consistent\n actual = data[\"a\"].mean(\"x\").to_dataset()\n assert_identical(actual, expected)\n\n def test_mean_uint_dtype(self):\n data = xr.Dataset(\n {\n \"a\": ((\"x\", \"y\"), np.arange(6).reshape(3, 2).astype(\"uint\")),\n \"b\": ((\"x\",), np.array([0.1, 0.2, np.nan])),\n }\n )\n actual = data.mean(\"x\", skipna=True)\n expected = xr.Dataset(\n {\"a\": data[\"a\"].mean(\"x\"), \"b\": data[\"b\"].mean(\"x\", skipna=True)}\n )\n assert_identical(actual, expected)\n\n def test_reduce_bad_dim(self):\n data = create_test_data()\n with raises_regex(ValueError, \"Dataset does not contain\"):\n data.mean(dim=\"bad_dim\")\n\n def test_reduce_cumsum(self):\n data = xr.Dataset(\n {\"a\": 1, \"b\": (\"x\", [1, 2]), \"c\": ((\"x\", \"y\"), [[np.nan, 3], [0, 4]])}\n )\n assert_identical(data.fillna(0), data.cumsum(\"y\"))\n\n expected = xr.Dataset(\n {\"a\": 1, \"b\": (\"x\", [1, 3]), \"c\": ((\"x\", \"y\"), [[0, 3], [0, 7]])}\n )\n assert_identical(expected, data.cumsum())\n\n def test_reduce_cumsum_test_dims(self):\n data = create_test_data()\n for cumfunc in [\"cumsum\", \"cumprod\"]:\n with raises_regex(ValueError, \"Dataset does not contain\"):\n getattr(data, cumfunc)(dim=\"bad_dim\")\n\n # ensure dimensions are correct\n for reduct, expected in [\n (\"dim1\", [\"dim1\", \"dim2\", \"dim3\", \"time\"]),\n (\"dim2\", [\"dim1\", \"dim2\", \"dim3\", \"time\"]),\n (\"dim3\", [\"dim1\", \"dim2\", \"dim3\", \"time\"]),\n (\"time\", [\"dim1\", \"dim2\", \"dim3\"]),\n ]:\n actual = getattr(data, cumfunc)(dim=reduct).dims\n assert list(actual) == expected\n\n def test_reduce_non_numeric(self):\n data1 = create_test_data(seed=44)\n data2 = create_test_data(seed=44)\n add_vars = {\"var4\": [\"dim1\", \"dim2\"]}\n for v, dims in sorted(add_vars.items()):\n size = tuple(data1.dims[d] for d in dims)\n data = np.random.randint(0, 100, size=size).astype(np.str_)\n data1[v] = (dims, data, {\"foo\": \"variable\"})\n\n assert \"var4\" not in data1.mean()\n assert_equal(data1.mean(), data2.mean())\n assert_equal(data1.mean(dim=\"dim1\"), data2.mean(dim=\"dim1\"))\n\n @pytest.mark.filterwarnings(\n \"ignore:Once the behaviour of DataArray:DeprecationWarning\"\n )\n def test_reduce_strings(self):\n expected = Dataset({\"x\": \"a\"})\n ds = Dataset({\"x\": (\"y\", [\"a\", \"b\"])})\n ds.coords[\"y\"] = [-10, 10]\n actual = ds.min()\n assert_identical(expected, actual)\n\n expected = Dataset({\"x\": \"b\"})\n actual = ds.max()\n assert_identical(expected, actual)\n\n expected = Dataset({\"x\": 0})\n actual = ds.argmin()\n assert_identical(expected, actual)\n\n expected = Dataset({\"x\": 1})\n actual = ds.argmax()\n assert_identical(expected, actual)\n\n expected = Dataset({\"x\": -10})\n actual = ds.idxmin()\n assert_identical(expected, actual)\n\n expected = Dataset({\"x\": 10})\n actual = ds.idxmax()\n assert_identical(expected, actual)\n\n expected = Dataset({\"x\": b\"a\"})\n ds = Dataset({\"x\": (\"y\", np.array([\"a\", \"b\"], \"S1\"))})\n actual = ds.min()\n assert_identical(expected, actual)\n\n expected = Dataset({\"x\": \"a\"})\n ds = Dataset({\"x\": (\"y\", np.array([\"a\", \"b\"], \"U1\"))})\n actual = ds.min()\n assert_identical(expected, actual)\n\n def test_reduce_dtypes(self):\n # regression test for GH342\n expected = Dataset({\"x\": 1})\n actual = Dataset({\"x\": True}).sum()\n assert_identical(expected, actual)\n\n # regression test for GH505\n expected = Dataset({\"x\": 3})\n actual = Dataset({\"x\": (\"y\", np.array([1, 2], \"uint16\"))}).sum()\n assert_identical(expected, actual)\n\n expected = Dataset({\"x\": 1 + 1j})\n actual = Dataset({\"x\": (\"y\", [1, 1j])}).sum()\n assert_identical(expected, actual)\n\n def test_reduce_keep_attrs(self):\n data = create_test_data()\n _attrs = {\"attr1\": \"value1\", \"attr2\": 2929}\n\n attrs = dict(_attrs)\n data.attrs = attrs\n\n # Test dropped attrs\n ds = data.mean()\n assert ds.attrs == {}\n for v in ds.data_vars.values():\n assert v.attrs == {}\n\n # Test kept attrs\n ds = data.mean(keep_attrs=True)\n assert ds.attrs == attrs\n for k, v in ds.data_vars.items():\n assert v.attrs == data[k].attrs\n\n @pytest.mark.filterwarnings(\n \"ignore:Once the behaviour of DataArray:DeprecationWarning\"\n )\n def test_reduce_argmin(self):\n # regression test for #205\n ds = Dataset({\"a\": (\"x\", [0, 1])})\n expected = Dataset({\"a\": ([], 0)})\n actual = ds.argmin()\n assert_identical(expected, actual)\n\n actual = ds.argmin(\"x\")\n assert_identical(expected, actual)\n\n def test_reduce_scalars(self):\n ds = Dataset({\"x\": (\"a\", [2, 2]), \"y\": 2, \"z\": (\"b\", [2])})\n expected = Dataset({\"x\": 0, \"y\": 0, \"z\": 0})\n actual = ds.var()\n assert_identical(expected, actual)\n\n expected = Dataset({\"x\": 0, \"y\": 0, \"z\": (\"b\", [0])})\n actual = ds.var(\"a\")\n assert_identical(expected, actual)\n\n def test_reduce_only_one_axis(self):\n def mean_only_one_axis(x, axis):\n if not isinstance(axis, integer_types):\n raise TypeError(\"non-integer axis\")\n return x.mean(axis)\n\n ds = Dataset({\"a\": ([\"x\", \"y\"], [[0, 1, 2, 3, 4]])})\n expected = Dataset({\"a\": (\"x\", [2])})\n actual = ds.reduce(mean_only_one_axis, \"y\")\n assert_identical(expected, actual)\n\n with raises_regex(TypeError, \"missing 1 required positional argument: 'axis'\"):\n ds.reduce(mean_only_one_axis)\n\n with raises_regex(TypeError, \"non-integer axis\"):\n ds.reduce(mean_only_one_axis, axis=[\"x\", \"y\"])\n\n def test_reduce_no_axis(self):\n def total_sum(x):\n return np.sum(x.flatten())\n\n ds = Dataset({\"a\": ([\"x\", \"y\"], [[0, 1, 2, 3, 4]])})\n expected = Dataset({\"a\": ((), 10)})\n actual = ds.reduce(total_sum)\n assert_identical(expected, actual)\n\n with raises_regex(TypeError, \"unexpected keyword argument 'axis'\"):\n ds.reduce(total_sum, axis=0)\n\n with raises_regex(TypeError, \"unexpected keyword argument 'axis'\"):\n ds.reduce(total_sum, dim=\"x\")\n\n def test_reduce_keepdims(self):\n ds = Dataset(\n {\"a\": ([\"x\", \"y\"], [[0, 1, 2, 3, 4]])},\n coords={\n \"y\": [0, 1, 2, 3, 4],\n \"x\": [0],\n \"lat\": ([\"x\", \"y\"], [[0, 1, 2, 3, 4]]),\n \"c\": -999.0,\n },\n )\n\n # Shape should match behaviour of numpy reductions with keepdims=True\n # Coordinates involved in the reduction should be removed\n actual = ds.mean(keepdims=True)\n expected = Dataset(\n {\"a\": ([\"x\", \"y\"], np.mean(ds.a, keepdims=True))}, coords={\"c\": ds.c}\n )\n assert_identical(expected, actual)\n\n actual = ds.mean(\"x\", keepdims=True)\n expected = Dataset(\n {\"a\": ([\"x\", \"y\"], np.mean(ds.a, axis=0, keepdims=True))},\n coords={\"y\": ds.y, \"c\": ds.c},\n )\n assert_identical(expected, actual)\n\n @pytest.mark.parametrize(\"skipna\", [True, False])\n @pytest.mark.parametrize(\"q\", [0.25, [0.50], [0.25, 0.75]])\n def test_quantile(self, q, skipna):\n ds = create_test_data(seed=123)\n\n for dim in [None, \"dim1\", [\"dim1\"]]:\n ds_quantile = ds.quantile(q, dim=dim, skipna=skipna)\n if is_scalar(q):\n assert \"quantile\" not in ds_quantile.dims\n else:\n assert \"quantile\" in ds_quantile.dims\n\n for var, dar in ds.data_vars.items():\n assert var in ds_quantile\n assert_identical(\n ds_quantile[var], dar.quantile(q, dim=dim, skipna=skipna)\n )\n dim = [\"dim1\", \"dim2\"]\n ds_quantile = ds.quantile(q, dim=dim, skipna=skipna)\n assert \"dim3\" in ds_quantile.dims\n assert all(d not in ds_quantile.dims for d in dim)\n\n @pytest.mark.parametrize(\"skipna\", [True, False])\n def test_quantile_skipna(self, skipna):\n q = 0.1\n dim = \"time\"\n ds = Dataset({\"a\": ([dim], np.arange(0, 11))})\n ds = ds.where(ds >= 1)\n\n result = ds.quantile(q=q, dim=dim, skipna=skipna)\n\n value = 1.9 if skipna else np.nan\n expected = Dataset({\"a\": value}, coords={\"quantile\": q})\n assert_identical(result, expected)\n\n @requires_bottleneck\n def test_rank(self):\n ds = create_test_data(seed=1234)\n # only ds.var3 depends on dim3\n z = ds.rank(\"dim3\")\n assert [\"var3\"] == list(z.data_vars)\n # same as dataarray version\n x = z.var3\n y = ds.var3.rank(\"dim3\")\n assert_equal(x, y)\n # coordinates stick\n assert list(z.coords) == list(ds.coords)\n assert list(x.coords) == list(y.coords)\n # invalid dim\n with raises_regex(ValueError, \"does not contain\"):\n x.rank(\"invalid_dim\")\n\n def test_count(self):\n ds = Dataset({\"x\": (\"a\", [np.nan, 1]), \"y\": 0, \"z\": np.nan})\n expected = Dataset({\"x\": 1, \"y\": 1, \"z\": 0})\n actual = ds.count()\n assert_identical(expected, actual)\n\n def test_map(self):\n data = create_test_data()\n data.attrs[\"foo\"] = \"bar\"\n\n assert_identical(data.map(np.mean), data.mean())\n\n expected = data.mean(keep_attrs=True)\n actual = data.map(lambda x: x.mean(keep_attrs=True), keep_attrs=True)\n assert_identical(expected, actual)\n\n assert_identical(data.map(lambda x: x, keep_attrs=True), data.drop_vars(\"time\"))\n\n def scale(x, multiple=1):\n return multiple * x\n\n actual = data.map(scale, multiple=2)\n assert_equal(actual[\"var1\"], 2 * data[\"var1\"])\n assert_identical(actual[\"numbers\"], data[\"numbers\"])\n\n actual = data.map(np.asarray)\n expected = data.drop_vars(\"time\") # time is not used on a data var\n assert_equal(expected, actual)\n\n def test_apply_pending_deprecated_map(self):\n data = create_test_data()\n data.attrs[\"foo\"] = \"bar\"\n\n with pytest.warns(PendingDeprecationWarning):\n assert_identical(data.apply(np.mean), data.mean())\n\n def make_example_math_dataset(self):\n variables = {\n \"bar\": (\"x\", np.arange(100, 400, 100)),\n \"foo\": ((\"x\", \"y\"), 1.0 * np.arange(12).reshape(3, 4)),\n }\n coords = {\"abc\": (\"x\", [\"a\", \"b\", \"c\"]), \"y\": 10 * np.arange(4)}\n ds = Dataset(variables, coords)\n ds[\"foo\"][0, 0] = np.nan\n return ds\n\n def test_dataset_number_math(self):\n ds = self.make_example_math_dataset()\n\n assert_identical(ds, +ds)\n assert_identical(ds, ds + 0)\n assert_identical(ds, 0 + ds)\n assert_identical(ds, ds + np.array(0))\n assert_identical(ds, np.array(0) + ds)\n\n actual = ds.copy(deep=True)\n actual += 0\n assert_identical(ds, actual)\n\n def test_unary_ops(self):\n ds = self.make_example_math_dataset()\n\n assert_identical(ds.map(abs), abs(ds))\n assert_identical(ds.map(lambda x: x + 4), ds + 4)\n\n for func in [\n lambda x: x.isnull(),\n lambda x: x.round(),\n lambda x: x.astype(int),\n ]:\n assert_identical(ds.map(func), func(ds))\n\n assert_identical(ds.isnull(), ~ds.notnull())\n\n # don't actually patch these methods in\n with pytest.raises(AttributeError):\n ds.item\n with pytest.raises(AttributeError):\n ds.searchsorted\n\n def test_dataset_array_math(self):\n ds = self.make_example_math_dataset()\n\n expected = ds.map(lambda x: x - ds[\"foo\"])\n assert_identical(expected, ds - ds[\"foo\"])\n assert_identical(expected, -ds[\"foo\"] + ds)\n assert_identical(expected, ds - ds[\"foo\"].variable)\n assert_identical(expected, -ds[\"foo\"].variable + ds)\n actual = ds.copy(deep=True)\n actual -= ds[\"foo\"]\n assert_identical(expected, actual)\n\n expected = ds.map(lambda x: x + ds[\"bar\"])\n assert_identical(expected, ds + ds[\"bar\"])\n actual = ds.copy(deep=True)\n actual += ds[\"bar\"]\n assert_identical(expected, actual)\n\n expected = Dataset({\"bar\": ds[\"bar\"] + np.arange(3)})\n assert_identical(expected, ds[[\"bar\"]] + np.arange(3))\n assert_identical(expected, np.arange(3) + ds[[\"bar\"]])\n\n def test_dataset_dataset_math(self):\n ds = self.make_example_math_dataset()\n\n assert_identical(ds, ds + 0 * ds)\n assert_identical(ds, ds + {\"foo\": 0, \"bar\": 0})\n\n expected = ds.map(lambda x: 2 * x)\n assert_identical(expected, 2 * ds)\n assert_identical(expected, ds + ds)\n assert_identical(expected, ds + ds.data_vars)\n assert_identical(expected, ds + dict(ds.data_vars))\n\n actual = ds.copy(deep=True)\n expected_id = id(actual)\n actual += ds\n assert_identical(expected, actual)\n assert expected_id == id(actual)\n\n assert_identical(ds == ds, ds.notnull())\n\n subsampled = ds.isel(y=slice(2))\n expected = 2 * subsampled\n assert_identical(expected, subsampled + ds)\n assert_identical(expected, ds + subsampled)\n\n def test_dataset_math_auto_align(self):\n ds = self.make_example_math_dataset()\n subset = ds.isel(y=[1, 3])\n expected = 2 * subset\n actual = ds + subset\n assert_identical(expected, actual)\n\n actual = ds.isel(y=slice(1)) + ds.isel(y=slice(1, None))\n expected = 2 * ds.drop_sel(y=ds.y)\n assert_equal(actual, expected)\n\n actual = ds + ds[[\"bar\"]]\n expected = (2 * ds[[\"bar\"]]).merge(ds.coords)\n assert_identical(expected, actual)\n\n assert_identical(ds + Dataset(), ds.coords.to_dataset())\n assert_identical(Dataset() + Dataset(), Dataset())\n\n ds2 = Dataset(coords={\"bar\": 42})\n assert_identical(ds + ds2, ds.coords.merge(ds2))\n\n # maybe unary arithmetic with empty datasets should raise instead?\n assert_identical(Dataset() + 1, Dataset())\n\n actual = ds.copy(deep=True)\n other = ds.isel(y=slice(2))\n actual += other\n expected = ds + other.reindex_like(ds)\n assert_identical(expected, actual)\n\n def test_dataset_math_errors(self):\n ds = self.make_example_math_dataset()\n\n with pytest.raises(TypeError):\n ds[\"foo\"] += ds\n with pytest.raises(TypeError):\n ds[\"foo\"].variable += ds\n with raises_regex(ValueError, \"must have the same\"):\n ds += ds[[\"bar\"]]\n\n # verify we can rollback in-place operations if something goes wrong\n # nb. inplace datetime64 math actually will work with an integer array\n # but not floats thanks to numpy's inconsistent handling\n other = DataArray(np.datetime64(\"2000-01-01\"), coords={\"c\": 2})\n actual = ds.copy(deep=True)\n with pytest.raises(TypeError):\n actual += other\n assert_identical(actual, ds)\n\n def test_dataset_transpose(self):\n ds = Dataset(\n {\n \"a\": ((\"x\", \"y\"), np.random.randn(3, 4)),\n \"b\": ((\"y\", \"x\"), np.random.randn(4, 3)),\n },\n coords={\n \"x\": range(3),\n \"y\": range(4),\n \"xy\": ((\"x\", \"y\"), np.random.randn(3, 4)),\n },\n )\n\n actual = ds.transpose()\n expected = Dataset(\n {\"a\": ((\"y\", \"x\"), ds.a.values.T), \"b\": ((\"x\", \"y\"), ds.b.values.T)},\n coords={\n \"x\": ds.x.values,\n \"y\": ds.y.values,\n \"xy\": ((\"y\", \"x\"), ds.xy.values.T),\n },\n )\n assert_identical(expected, actual)\n\n actual = ds.transpose(...)\n expected = ds\n assert_identical(expected, actual)\n\n actual = ds.transpose(\"x\", \"y\")\n expected = ds.map(lambda x: x.transpose(\"x\", \"y\", transpose_coords=True))\n assert_identical(expected, actual)\n\n ds = create_test_data()\n actual = ds.transpose()\n for k in ds.variables:\n assert actual[k].dims[::-1] == ds[k].dims\n\n new_order = (\"dim2\", \"dim3\", \"dim1\", \"time\")\n actual = ds.transpose(*new_order)\n for k in ds.variables:\n expected_dims = tuple(d for d in new_order if d in ds[k].dims)\n assert actual[k].dims == expected_dims\n\n # same as above but with ellipsis\n new_order = (\"dim2\", \"dim3\", \"dim1\", \"time\")\n actual = ds.transpose(\"dim2\", \"dim3\", ...)\n for k in ds.variables:\n expected_dims = tuple(d for d in new_order if d in ds[k].dims)\n assert actual[k].dims == expected_dims\n\n with raises_regex(ValueError, \"permuted\"):\n ds.transpose(\"dim1\", \"dim2\", \"dim3\")\n with raises_regex(ValueError, \"permuted\"):\n ds.transpose(\"dim1\", \"dim2\", \"dim3\", \"time\", \"extra_dim\")\n\n assert \"T\" not in dir(ds)\n\n def test_dataset_ellipsis_transpose_different_ordered_vars(self):\n # https://github.com/pydata/xarray/issues/1081#issuecomment-544350457\n ds = Dataset(\n dict(\n a=((\"w\", \"x\", \"y\", \"z\"), np.ones((2, 3, 4, 5))),\n b=((\"x\", \"w\", \"y\", \"z\"), np.zeros((3, 2, 4, 5))),\n )\n )\n result = ds.transpose(..., \"z\", \"y\")\n assert list(result[\"a\"].dims) == list(\"wxzy\")\n assert list(result[\"b\"].dims) == list(\"xwzy\")\n\n def test_dataset_retains_period_index_on_transpose(self):\n\n ds = create_test_data()\n ds[\"time\"] = pd.period_range(\"2000-01-01\", periods=20)\n\n transposed = ds.transpose()\n\n assert isinstance(transposed.time.to_index(), pd.PeriodIndex)\n\n def test_dataset_diff_n1_simple(self):\n ds = Dataset({\"foo\": (\"x\", [5, 5, 6, 6])})\n actual = ds.diff(\"x\")\n expected = Dataset({\"foo\": (\"x\", [0, 1, 0])})\n assert_equal(expected, actual)\n\n def test_dataset_diff_n1_label(self):\n ds = Dataset({\"foo\": (\"x\", [5, 5, 6, 6])}, {\"x\": [0, 1, 2, 3]})\n actual = ds.diff(\"x\", label=\"lower\")\n expected = Dataset({\"foo\": (\"x\", [0, 1, 0])}, {\"x\": [0, 1, 2]})\n assert_equal(expected, actual)\n\n actual = ds.diff(\"x\", label=\"upper\")\n expected = Dataset({\"foo\": (\"x\", [0, 1, 0])}, {\"x\": [1, 2, 3]})\n assert_equal(expected, actual)\n\n def test_dataset_diff_n1(self):\n ds = create_test_data(seed=1)\n actual = ds.diff(\"dim2\")\n expected = {}\n expected[\"var1\"] = DataArray(\n np.diff(ds[\"var1\"].values, axis=1),\n {\"dim2\": ds[\"dim2\"].values[1:]},\n [\"dim1\", \"dim2\"],\n )\n expected[\"var2\"] = DataArray(\n np.diff(ds[\"var2\"].values, axis=1),\n {\"dim2\": ds[\"dim2\"].values[1:]},\n [\"dim1\", \"dim2\"],\n )\n expected[\"var3\"] = ds[\"var3\"]\n expected = Dataset(expected, coords={\"time\": ds[\"time\"].values})\n expected.coords[\"numbers\"] = (\"dim3\", ds[\"numbers\"].values)\n assert_equal(expected, actual)\n\n def test_dataset_diff_n2(self):\n ds = create_test_data(seed=1)\n actual = ds.diff(\"dim2\", n=2)\n expected = {}\n expected[\"var1\"] = DataArray(\n np.diff(ds[\"var1\"].values, axis=1, n=2),\n {\"dim2\": ds[\"dim2\"].values[2:]},\n [\"dim1\", \"dim2\"],\n )\n expected[\"var2\"] = DataArray(\n np.diff(ds[\"var2\"].values, axis=1, n=2),\n {\"dim2\": ds[\"dim2\"].values[2:]},\n [\"dim1\", \"dim2\"],\n )\n expected[\"var3\"] = ds[\"var3\"]\n expected = Dataset(expected, coords={\"time\": ds[\"time\"].values})\n expected.coords[\"numbers\"] = (\"dim3\", ds[\"numbers\"].values)\n assert_equal(expected, actual)\n\n def test_dataset_diff_exception_n_neg(self):\n ds = create_test_data(seed=1)\n with raises_regex(ValueError, \"must be non-negative\"):\n ds.diff(\"dim2\", n=-1)\n\n def test_dataset_diff_exception_label_str(self):\n ds = create_test_data(seed=1)\n with raises_regex(ValueError, \"'label' argument has to\"):\n ds.diff(\"dim2\", label=\"raise_me\")\n\n @pytest.mark.parametrize(\"fill_value\", [dtypes.NA, 2, 2.0, {\"foo\": -10}])\n def test_shift(self, fill_value):\n coords = {\"bar\": (\"x\", list(\"abc\")), \"x\": [-4, 3, 2]}\n attrs = {\"meta\": \"data\"}\n ds = Dataset({\"foo\": (\"x\", [1, 2, 3])}, coords, attrs)\n actual = ds.shift(x=1, fill_value=fill_value)\n if fill_value == dtypes.NA:\n # if we supply the default, we expect the missing value for a\n # float array\n fill_value = np.nan\n elif isinstance(fill_value, dict):\n fill_value = fill_value.get(\"foo\", np.nan)\n expected = Dataset({\"foo\": (\"x\", [fill_value, 1, 2])}, coords, attrs)\n assert_identical(expected, actual)\n\n with raises_regex(ValueError, \"dimensions\"):\n ds.shift(foo=123)\n\n def test_roll_coords(self):\n coords = {\"bar\": (\"x\", list(\"abc\")), \"x\": [-4, 3, 2]}\n attrs = {\"meta\": \"data\"}\n ds = Dataset({\"foo\": (\"x\", [1, 2, 3])}, coords, attrs)\n actual = ds.roll(x=1, roll_coords=True)\n\n ex_coords = {\"bar\": (\"x\", list(\"cab\")), \"x\": [2, -4, 3]}\n expected = Dataset({\"foo\": (\"x\", [3, 1, 2])}, ex_coords, attrs)\n assert_identical(expected, actual)\n\n with raises_regex(ValueError, \"dimensions\"):\n ds.roll(foo=123, roll_coords=True)\n\n def test_roll_no_coords(self):\n coords = {\"bar\": (\"x\", list(\"abc\")), \"x\": [-4, 3, 2]}\n attrs = {\"meta\": \"data\"}\n ds = Dataset({\"foo\": (\"x\", [1, 2, 3])}, coords, attrs)\n actual = ds.roll(x=1, roll_coords=False)\n\n expected = Dataset({\"foo\": (\"x\", [3, 1, 2])}, coords, attrs)\n assert_identical(expected, actual)\n\n with raises_regex(ValueError, \"dimensions\"):\n ds.roll(abc=321, roll_coords=False)\n\n def test_roll_coords_none(self):\n coords = {\"bar\": (\"x\", list(\"abc\")), \"x\": [-4, 3, 2]}\n attrs = {\"meta\": \"data\"}\n ds = Dataset({\"foo\": (\"x\", [1, 2, 3])}, coords, attrs)\n\n with pytest.warns(FutureWarning):\n actual = ds.roll(x=1, roll_coords=None)\n\n ex_coords = {\"bar\": (\"x\", list(\"cab\")), \"x\": [2, -4, 3]}\n expected = Dataset({\"foo\": (\"x\", [3, 1, 2])}, ex_coords, attrs)\n assert_identical(expected, actual)\n\n def test_roll_multidim(self):\n # regression test for 2445\n arr = xr.DataArray(\n [[1, 2, 3], [4, 5, 6]],\n coords={\"x\": range(3), \"y\": range(2)},\n dims=(\"y\", \"x\"),\n )\n actual = arr.roll(x=1, roll_coords=True)\n expected = xr.DataArray(\n [[3, 1, 2], [6, 4, 5]], coords=[(\"y\", [0, 1]), (\"x\", [2, 0, 1])]\n )\n assert_identical(expected, actual)\n\n def test_real_and_imag(self):\n attrs = {\"foo\": \"bar\"}\n ds = Dataset({\"x\": ((), 1 + 2j, attrs)}, attrs=attrs)\n\n expected_re = Dataset({\"x\": ((), 1, attrs)}, attrs=attrs)\n assert_identical(ds.real, expected_re)\n\n expected_im = Dataset({\"x\": ((), 2, attrs)}, attrs=attrs)\n assert_identical(ds.imag, expected_im)\n\n def test_setattr_raises(self):\n ds = Dataset({}, coords={\"scalar\": 1}, attrs={\"foo\": \"bar\"})\n with raises_regex(AttributeError, \"cannot set attr\"):\n ds.scalar = 2\n with raises_regex(AttributeError, \"cannot set attr\"):\n ds.foo = 2\n with raises_regex(AttributeError, \"cannot set attr\"):\n ds.other = 2\n\n def test_filter_by_attrs(self):\n precip = dict(standard_name=\"convective_precipitation_flux\")\n temp0 = dict(standard_name=\"air_potential_temperature\", height=\"0 m\")\n temp10 = dict(standard_name=\"air_potential_temperature\", height=\"10 m\")\n ds = Dataset(\n {\n \"temperature_0\": ([\"t\"], [0], temp0),\n \"temperature_10\": ([\"t\"], [0], temp10),\n \"precipitation\": ([\"t\"], [0], precip),\n },\n coords={\"time\": ([\"t\"], [0], dict(axis=\"T\", long_name=\"time_in_seconds\"))},\n )\n\n # Test return empty Dataset.\n ds.filter_by_attrs(standard_name=\"invalid_standard_name\")\n new_ds = ds.filter_by_attrs(standard_name=\"invalid_standard_name\")\n assert not bool(new_ds.data_vars)\n\n # Test return one DataArray.\n new_ds = ds.filter_by_attrs(standard_name=\"convective_precipitation_flux\")\n assert new_ds[\"precipitation\"].standard_name == \"convective_precipitation_flux\"\n\n assert_equal(new_ds[\"precipitation\"], ds[\"precipitation\"])\n\n # Test filter coordinates\n new_ds = ds.filter_by_attrs(long_name=\"time_in_seconds\")\n assert new_ds[\"time\"].long_name == \"time_in_seconds\"\n assert not bool(new_ds.data_vars)\n\n # Test return more than one DataArray.\n new_ds = ds.filter_by_attrs(standard_name=\"air_potential_temperature\")\n assert len(new_ds.data_vars) == 2\n for var in new_ds.data_vars:\n assert new_ds[var].standard_name == \"air_potential_temperature\"\n\n # Test callable.\n new_ds = ds.filter_by_attrs(height=lambda v: v is not None)\n assert len(new_ds.data_vars) == 2\n for var in new_ds.data_vars:\n assert new_ds[var].standard_name == \"air_potential_temperature\"\n\n new_ds = ds.filter_by_attrs(height=\"10 m\")\n assert len(new_ds.data_vars) == 1\n for var in new_ds.data_vars:\n assert new_ds[var].height == \"10 m\"\n\n # Test return empty Dataset due to conflicting filters\n new_ds = ds.filter_by_attrs(\n standard_name=\"convective_precipitation_flux\", height=\"0 m\"\n )\n assert not bool(new_ds.data_vars)\n\n # Test return one DataArray with two filter conditions\n new_ds = ds.filter_by_attrs(\n standard_name=\"air_potential_temperature\", height=\"0 m\"\n )\n for var in new_ds.data_vars:\n assert new_ds[var].standard_name == \"air_potential_temperature\"\n assert new_ds[var].height == \"0 m\"\n assert new_ds[var].height != \"10 m\"\n\n # Test return empty Dataset due to conflicting callables\n new_ds = ds.filter_by_attrs(\n standard_name=lambda v: False, height=lambda v: True\n )\n assert not bool(new_ds.data_vars)\n\n def test_binary_op_propagate_indexes(self):\n ds = Dataset(\n {\"d1\": DataArray([1, 2, 3], dims=[\"x\"], coords={\"x\": [10, 20, 30]})}\n )\n expected = ds.indexes[\"x\"]\n actual = (ds * 2).indexes[\"x\"]\n assert expected is actual\n\n def test_binary_op_join_setting(self):\n # arithmetic_join applies to data array coordinates\n missing_2 = xr.Dataset({\"x\": [0, 1]})\n missing_0 = xr.Dataset({\"x\": [1, 2]})\n with xr.set_options(arithmetic_join=\"outer\"):\n actual = missing_2 + missing_0\n expected = xr.Dataset({\"x\": [0, 1, 2]})\n assert_equal(actual, expected)\n\n # arithmetic join also applies to data_vars\n ds1 = xr.Dataset({\"foo\": 1, \"bar\": 2})\n ds2 = xr.Dataset({\"bar\": 2, \"baz\": 3})\n expected = xr.Dataset({\"bar\": 4}) # default is inner joining\n actual = ds1 + ds2\n assert_equal(actual, expected)\n\n with xr.set_options(arithmetic_join=\"outer\"):\n expected = xr.Dataset({\"foo\": np.nan, \"bar\": 4, \"baz\": np.nan})\n actual = ds1 + ds2\n assert_equal(actual, expected)\n\n with xr.set_options(arithmetic_join=\"left\"):\n expected = xr.Dataset({\"foo\": np.nan, \"bar\": 4})\n actual = ds1 + ds2\n assert_equal(actual, expected)\n\n with xr.set_options(arithmetic_join=\"right\"):\n expected = xr.Dataset({\"bar\": 4, \"baz\": np.nan})\n actual = ds1 + ds2\n assert_equal(actual, expected)\n\n def test_full_like(self):\n # For more thorough tests, see test_variable.py\n # Note: testing data_vars with mismatched dtypes\n ds = Dataset(\n {\n \"d1\": DataArray([1, 2, 3], dims=[\"x\"], coords={\"x\": [10, 20, 30]}),\n \"d2\": DataArray([1.1, 2.2, 3.3], dims=[\"y\"]),\n },\n attrs={\"foo\": \"bar\"},\n )\n actual = full_like(ds, 2)\n\n expected = ds.copy(deep=True)\n expected[\"d1\"].values = [2, 2, 2]\n expected[\"d2\"].values = [2.0, 2.0, 2.0]\n assert expected[\"d1\"].dtype == int\n assert expected[\"d2\"].dtype == float\n assert_identical(expected, actual)\n\n # override dtype\n actual = full_like(ds, fill_value=True, dtype=bool)\n expected = ds.copy(deep=True)\n expected[\"d1\"].values = [True, True, True]\n expected[\"d2\"].values = [True, True, True]\n assert expected[\"d1\"].dtype == bool\n assert expected[\"d2\"].dtype == bool\n assert_identical(expected, actual)\n\n # with multiple fill values\n actual = full_like(ds, {\"d1\": 1, \"d2\": 2.3})\n expected = ds.assign(d1=(\"x\", [1, 1, 1]), d2=(\"y\", [2.3, 2.3, 2.3]))\n assert expected[\"d1\"].dtype == int\n assert expected[\"d2\"].dtype == float\n assert_identical(expected, actual)\n\n # override multiple dtypes\n actual = full_like(ds, fill_value={\"d1\": 1, \"d2\": 2.3}, dtype={\"d1\": bool})\n expected = ds.assign(d1=(\"x\", [True, True, True]), d2=(\"y\", [2.3, 2.3, 2.3]))\n assert expected[\"d1\"].dtype == bool\n assert expected[\"d2\"].dtype == float\n assert_identical(expected, actual)\n\n def test_combine_first(self):\n dsx0 = DataArray([0, 0], [(\"x\", [\"a\", \"b\"])]).to_dataset(name=\"dsx0\")\n dsx1 = DataArray([1, 1], [(\"x\", [\"b\", \"c\"])]).to_dataset(name=\"dsx1\")\n\n actual = dsx0.combine_first(dsx1)\n expected = Dataset(\n {\"dsx0\": (\"x\", [0, 0, np.nan]), \"dsx1\": (\"x\", [np.nan, 1, 1])},\n coords={\"x\": [\"a\", \"b\", \"c\"]},\n )\n assert_equal(actual, expected)\n assert_equal(actual, xr.merge([dsx0, dsx1]))\n\n # works just like xr.merge([self, other])\n dsy2 = DataArray([2, 2, 2], [(\"x\", [\"b\", \"c\", \"d\"])]).to_dataset(name=\"dsy2\")\n actual = dsx0.combine_first(dsy2)\n expected = xr.merge([dsy2, dsx0])\n assert_equal(actual, expected)\n\n def test_sortby(self):\n ds = Dataset(\n {\n \"A\": DataArray(\n [[1, 2], [3, 4], [5, 6]], [(\"x\", [\"c\", \"b\", \"a\"]), (\"y\", [1, 0])]\n ),\n \"B\": DataArray([[5, 6], [7, 8], [9, 10]], dims=[\"x\", \"y\"]),\n }\n )\n\n sorted1d = Dataset(\n {\n \"A\": DataArray(\n [[5, 6], [3, 4], [1, 2]], [(\"x\", [\"a\", \"b\", \"c\"]), (\"y\", [1, 0])]\n ),\n \"B\": DataArray([[9, 10], [7, 8], [5, 6]], dims=[\"x\", \"y\"]),\n }\n )\n\n sorted2d = Dataset(\n {\n \"A\": DataArray(\n [[6, 5], [4, 3], [2, 1]], [(\"x\", [\"a\", \"b\", \"c\"]), (\"y\", [0, 1])]\n ),\n \"B\": DataArray([[10, 9], [8, 7], [6, 5]], dims=[\"x\", \"y\"]),\n }\n )\n\n expected = sorted1d\n dax = DataArray([100, 99, 98], [(\"x\", [\"c\", \"b\", \"a\"])])\n actual = ds.sortby(dax)\n assert_equal(actual, expected)\n\n # test descending order sort\n actual = ds.sortby(dax, ascending=False)\n assert_equal(actual, ds)\n\n # test alignment (fills in nan for 'c')\n dax_short = DataArray([98, 97], [(\"x\", [\"b\", \"a\"])])\n actual = ds.sortby(dax_short)\n assert_equal(actual, expected)\n\n # test 1-D lexsort\n # dax0 is sorted first to give indices of [1, 2, 0]\n # and then dax1 would be used to move index 2 ahead of 1\n dax0 = DataArray([100, 95, 95], [(\"x\", [\"c\", \"b\", \"a\"])])\n dax1 = DataArray([0, 1, 0], [(\"x\", [\"c\", \"b\", \"a\"])])\n actual = ds.sortby([dax0, dax1]) # lexsort underneath gives [2, 1, 0]\n assert_equal(actual, expected)\n\n expected = sorted2d\n # test multi-dim sort by 1D dataarray values\n day = DataArray([90, 80], [(\"y\", [1, 0])])\n actual = ds.sortby([day, dax])\n assert_equal(actual, expected)\n\n # test exception-raising\n with pytest.raises(KeyError) as excinfo:\n actual = ds.sortby(\"z\")\n\n with pytest.raises(ValueError) as excinfo:\n actual = ds.sortby(ds[\"A\"])\n assert \"DataArray is not 1-D\" in str(excinfo.value)\n\n expected = sorted1d\n actual = ds.sortby(\"x\")\n assert_equal(actual, expected)\n\n # test pandas.MultiIndex\n indices = ((\"b\", 1), (\"b\", 0), (\"a\", 1), (\"a\", 0))\n midx = pd.MultiIndex.from_tuples(indices, names=[\"one\", \"two\"])\n ds_midx = Dataset(\n {\n \"A\": DataArray(\n [[1, 2], [3, 4], [5, 6], [7, 8]], [(\"x\", midx), (\"y\", [1, 0])]\n ),\n \"B\": DataArray([[5, 6], [7, 8], [9, 10], [11, 12]], dims=[\"x\", \"y\"]),\n }\n )\n actual = ds_midx.sortby(\"x\")\n midx_reversed = pd.MultiIndex.from_tuples(\n tuple(reversed(indices)), names=[\"one\", \"two\"]\n )\n expected = Dataset(\n {\n \"A\": DataArray(\n [[7, 8], [5, 6], [3, 4], [1, 2]],\n [(\"x\", midx_reversed), (\"y\", [1, 0])],\n ),\n \"B\": DataArray([[11, 12], [9, 10], [7, 8], [5, 6]], dims=[\"x\", \"y\"]),\n }\n )\n assert_equal(actual, expected)\n\n # multi-dim sort by coordinate objects\n expected = sorted2d\n actual = ds.sortby([\"x\", \"y\"])\n assert_equal(actual, expected)\n\n # test descending order sort\n actual = ds.sortby([\"x\", \"y\"], ascending=False)\n assert_equal(actual, ds)\n\n def test_attribute_access(self):\n ds = create_test_data(seed=1)\n for key in [\"var1\", \"var2\", \"var3\", \"time\", \"dim1\", \"dim2\", \"dim3\", \"numbers\"]:\n assert_equal(ds[key], getattr(ds, key))\n assert key in dir(ds)\n\n for key in [\"dim3\", \"dim1\", \"numbers\"]:\n assert_equal(ds[\"var3\"][key], getattr(ds.var3, key))\n assert key in dir(ds[\"var3\"])\n # attrs\n assert ds[\"var3\"].attrs[\"foo\"] == ds.var3.foo\n assert \"foo\" in dir(ds[\"var3\"])\n\n def test_ipython_key_completion(self):\n ds = create_test_data(seed=1)\n actual = ds._ipython_key_completions_()\n expected = [\"var1\", \"var2\", \"var3\", \"time\", \"dim1\", \"dim2\", \"dim3\", \"numbers\"]\n for item in actual:\n ds[item] # should not raise\n assert sorted(actual) == sorted(expected)\n\n # for dataarray\n actual = ds[\"var3\"]._ipython_key_completions_()\n expected = [\"dim3\", \"dim1\", \"numbers\"]\n for item in actual:\n ds[\"var3\"][item] # should not raise\n assert sorted(actual) == sorted(expected)\n\n # MultiIndex\n ds_midx = ds.stack(dim12=[\"dim1\", \"dim2\"])\n actual = ds_midx._ipython_key_completions_()\n expected = [\n \"var1\",\n \"var2\",\n \"var3\",\n \"time\",\n \"dim1\",\n \"dim2\",\n \"dim3\",\n \"numbers\",\n \"dim12\",\n ]\n for item in actual:\n ds_midx[item] # should not raise\n assert sorted(actual) == sorted(expected)\n\n # coords\n actual = ds.coords._ipython_key_completions_()\n expected = [\"time\", \"dim1\", \"dim2\", \"dim3\", \"numbers\"]\n for item in actual:\n ds.coords[item] # should not raise\n assert sorted(actual) == sorted(expected)\n\n actual = ds[\"var3\"].coords._ipython_key_completions_()\n expected = [\"dim1\", \"dim3\", \"numbers\"]\n for item in actual:\n ds[\"var3\"].coords[item] # should not raise\n assert sorted(actual) == sorted(expected)\n\n # data_vars\n actual = ds.data_vars._ipython_key_completions_()\n expected = [\"var1\", \"var2\", \"var3\", \"dim1\"]\n for item in actual:\n ds.data_vars[item] # should not raise\n assert sorted(actual) == sorted(expected)\n\n def test_polyfit_output(self):\n ds = create_test_data(seed=1)\n\n out = ds.polyfit(\"dim2\", 2, full=False)\n assert \"var1_polyfit_coefficients\" in out\n\n out = ds.polyfit(\"dim1\", 2, full=True)\n assert \"var1_polyfit_coefficients\" in out\n assert \"dim1_matrix_rank\" in out\n\n out = ds.polyfit(\"time\", 2)\n assert len(out.data_vars) == 0\n\n def test_polyfit_warnings(self):\n ds = create_test_data(seed=1)\n\n with warnings.catch_warnings(record=True) as ws:\n ds.var1.polyfit(\"dim2\", 10, full=False)\n assert len(ws) == 1\n assert ws[0].category == np.RankWarning\n ds.var1.polyfit(\"dim2\", 10, full=True)\n assert len(ws) == 1\n\n def test_pad(self):\n ds = create_test_data(seed=1)\n padded = ds.pad(dim2=(1, 1), constant_values=42)\n\n assert padded[\"dim2\"].shape == (11,)\n assert padded[\"var1\"].shape == (8, 11)\n assert padded[\"var2\"].shape == (8, 11)\n assert padded[\"var3\"].shape == (10, 8)\n assert dict(padded.dims) == {\"dim1\": 8, \"dim2\": 11, \"dim3\": 10, \"time\": 20}\n\n np.testing.assert_equal(padded[\"var1\"].isel(dim2=[0, -1]).data, 42)\n np.testing.assert_equal(padded[\"dim2\"][[0, -1]].data, np.nan)\n\n def test_astype_attrs(self):\n data = create_test_data(seed=123)\n data.attrs[\"foo\"] = \"bar\"\n\n assert data.attrs == data.astype(float).attrs\n assert data.var1.attrs == data.astype(float).var1.attrs\n assert not data.astype(float, keep_attrs=False).attrs\n assert not data.astype(float, keep_attrs=False).var1.attrs\n\n\n# Py.test tests\n\n\[email protected](params=[None])\ndef data_set(request):\n return create_test_data(request.param)\n\n\[email protected](\"test_elements\", ([1, 2], np.array([1, 2]), DataArray([1, 2])))\ndef test_isin(test_elements):\n expected = Dataset(\n data_vars={\n \"var1\": ((\"dim1\",), [0, 1]),\n \"var2\": ((\"dim1\",), [1, 1]),\n \"var3\": ((\"dim1\",), [0, 1]),\n }\n ).astype(\"bool\")\n\n result = Dataset(\n data_vars={\n \"var1\": ((\"dim1\",), [0, 1]),\n \"var2\": ((\"dim1\",), [1, 2]),\n \"var3\": ((\"dim1\",), [0, 1]),\n }\n ).isin(test_elements)\n\n assert_equal(result, expected)\n\n\[email protected](not has_dask, reason=\"requires dask\")\[email protected](\"test_elements\", ([1, 2], np.array([1, 2]), DataArray([1, 2])))\ndef test_isin_dask(test_elements):\n expected = Dataset(\n data_vars={\n \"var1\": ((\"dim1\",), [0, 1]),\n \"var2\": ((\"dim1\",), [1, 1]),\n \"var3\": ((\"dim1\",), [0, 1]),\n }\n ).astype(\"bool\")\n\n result = (\n Dataset(\n data_vars={\n \"var1\": ((\"dim1\",), [0, 1]),\n \"var2\": ((\"dim1\",), [1, 2]),\n \"var3\": ((\"dim1\",), [0, 1]),\n }\n )\n .chunk(1)\n .isin(test_elements)\n .compute()\n )\n\n assert_equal(result, expected)\n\n\ndef test_isin_dataset():\n ds = Dataset({\"x\": [1, 2]})\n with pytest.raises(TypeError):\n ds.isin(ds)\n\n\[email protected](\n \"unaligned_coords\",\n (\n {\"x\": [2, 1, 0]},\n {\"x\": ([\"x\"], np.asarray([2, 1, 0]))},\n {\"x\": ([\"x\"], np.asarray([1, 2, 0]))},\n {\"x\": pd.Index([2, 1, 0])},\n {\"x\": Variable(dims=\"x\", data=[0, 2, 1])},\n {\"x\": IndexVariable(dims=\"x\", data=[0, 1, 2])},\n {\"y\": 42},\n {\"y\": (\"x\", [2, 1, 0])},\n {\"y\": (\"x\", np.asarray([2, 1, 0]))},\n {\"y\": ([\"x\"], np.asarray([2, 1, 0]))},\n ),\n)\[email protected](\"coords\", ({\"x\": (\"x\", [0, 1, 2])}, {\"x\": [0, 1, 2]}))\ndef test_dataset_constructor_aligns_to_explicit_coords(unaligned_coords, coords):\n\n a = xr.DataArray([1, 2, 3], dims=[\"x\"], coords=unaligned_coords)\n\n expected = xr.Dataset(coords=coords)\n expected[\"a\"] = a\n\n result = xr.Dataset({\"a\": a}, coords=coords)\n\n assert_equal(expected, result)\n\n\ndef test_error_message_on_set_supplied():\n with pytest.raises(TypeError, match=\"has invalid type <class 'set'>\"):\n xr.Dataset(dict(date=[1, 2, 3], sec={4}))\n\n\[email protected](\"unaligned_coords\", ({\"y\": (\"b\", np.asarray([2, 1, 0]))},))\ndef test_constructor_raises_with_invalid_coords(unaligned_coords):\n\n with pytest.raises(ValueError, match=\"not a subset of the DataArray dimensions\"):\n xr.DataArray([1, 2, 3], dims=[\"x\"], coords=unaligned_coords)\n\n\ndef test_dir_expected_attrs(data_set):\n\n some_expected_attrs = {\"pipe\", \"mean\", \"isnull\", \"var1\", \"dim2\", \"numbers\"}\n result = dir(data_set)\n assert set(result) >= some_expected_attrs\n\n\ndef test_dir_non_string(data_set):\n # add a numbered key to ensure this doesn't break dir\n data_set[5] = \"foo\"\n result = dir(data_set)\n assert 5 not in result\n\n # GH2172\n sample_data = np.random.uniform(size=[2, 2000, 10000])\n x = xr.Dataset({\"sample_data\": (sample_data.shape, sample_data)})\n x2 = x[\"sample_data\"]\n dir(x2)\n\n\ndef test_dir_unicode(data_set):\n data_set[\"unicode\"] = \"uni\"\n result = dir(data_set)\n assert \"unicode\" in result\n\n\[email protected](params=[1])\ndef ds(request):\n if request.param == 1:\n return Dataset(\n {\n \"z1\": ([\"y\", \"x\"], np.random.randn(2, 8)),\n \"z2\": ([\"time\", \"y\"], np.random.randn(10, 2)),\n },\n {\n \"x\": (\"x\", np.linspace(0, 1.0, 8)),\n \"time\": (\"time\", np.linspace(0, 1.0, 10)),\n \"c\": (\"y\", [\"a\", \"b\"]),\n \"y\": range(2),\n },\n )\n\n if request.param == 2:\n return Dataset(\n {\n \"z1\": ([\"time\", \"y\"], np.random.randn(10, 2)),\n \"z2\": ([\"time\"], np.random.randn(10)),\n \"z3\": ([\"x\", \"time\"], np.random.randn(8, 10)),\n },\n {\n \"x\": (\"x\", np.linspace(0, 1.0, 8)),\n \"time\": (\"time\", np.linspace(0, 1.0, 10)),\n \"c\": (\"y\", [\"a\", \"b\"]),\n \"y\": range(2),\n },\n )\n\n\ndef test_coarsen_absent_dims_error(ds):\n with raises_regex(ValueError, \"not found in Dataset.\"):\n ds.coarsen(foo=2)\n\n\[email protected](\"dask\", [True, False])\[email protected]((\"boundary\", \"side\"), [(\"trim\", \"left\"), (\"pad\", \"right\")])\ndef test_coarsen(ds, dask, boundary, side):\n if dask and has_dask:\n ds = ds.chunk({\"x\": 4})\n\n actual = ds.coarsen(time=2, x=3, boundary=boundary, side=side).max()\n assert_equal(\n actual[\"z1\"], ds[\"z1\"].coarsen(x=3, boundary=boundary, side=side).max()\n )\n # coordinate should be mean by default\n assert_equal(\n actual[\"time\"], ds[\"time\"].coarsen(time=2, boundary=boundary, side=side).mean()\n )\n\n\[email protected](\"dask\", [True, False])\ndef test_coarsen_coords(ds, dask):\n if dask and has_dask:\n ds = ds.chunk({\"x\": 4})\n\n # check if coord_func works\n actual = ds.coarsen(time=2, x=3, boundary=\"trim\", coord_func={\"time\": \"max\"}).max()\n assert_equal(actual[\"z1\"], ds[\"z1\"].coarsen(x=3, boundary=\"trim\").max())\n assert_equal(actual[\"time\"], ds[\"time\"].coarsen(time=2, boundary=\"trim\").max())\n\n # raise if exact\n with pytest.raises(ValueError):\n ds.coarsen(x=3).mean()\n # should be no error\n ds.isel(x=slice(0, 3 * (len(ds[\"x\"]) // 3))).coarsen(x=3).mean()\n\n # working test with pd.time\n da = xr.DataArray(\n np.linspace(0, 365, num=364),\n dims=\"time\",\n coords={\"time\": pd.date_range(\"15/12/1999\", periods=364)},\n )\n actual = da.coarsen(time=2).mean()\n\n\n@requires_cftime\ndef test_coarsen_coords_cftime():\n times = xr.cftime_range(\"2000\", periods=6)\n da = xr.DataArray(range(6), [(\"time\", times)])\n actual = da.coarsen(time=3).mean()\n expected_times = xr.cftime_range(\"2000-01-02\", freq=\"3D\", periods=2)\n np.testing.assert_array_equal(actual.time, expected_times)\n\n\ndef test_coarsen_keep_attrs():\n _attrs = {\"units\": \"test\", \"long_name\": \"testing\"}\n\n var1 = np.linspace(10, 15, 100)\n var2 = np.linspace(5, 10, 100)\n coords = np.linspace(1, 10, 100)\n\n ds = Dataset(\n data_vars={\"var1\": (\"coord\", var1), \"var2\": (\"coord\", var2)},\n coords={\"coord\": coords},\n attrs=_attrs,\n )\n\n ds2 = ds.copy(deep=True)\n\n # Test dropped attrs\n dat = ds.coarsen(coord=5).mean()\n assert dat.attrs == {}\n\n # Test kept attrs using dataset keyword\n dat = ds.coarsen(coord=5, keep_attrs=True).mean()\n assert dat.attrs == _attrs\n\n # Test kept attrs using global option\n with set_options(keep_attrs=True):\n dat = ds.coarsen(coord=5).mean()\n assert dat.attrs == _attrs\n\n # Test kept attrs in original object\n xr.testing.assert_identical(ds, ds2)\n\n\ndef test_rolling_keep_attrs():\n _attrs = {\"units\": \"test\", \"long_name\": \"testing\"}\n\n var1 = np.linspace(10, 15, 100)\n var2 = np.linspace(5, 10, 100)\n coords = np.linspace(1, 10, 100)\n\n ds = Dataset(\n data_vars={\"var1\": (\"coord\", var1), \"var2\": (\"coord\", var2)},\n coords={\"coord\": coords},\n attrs=_attrs,\n )\n\n # Test dropped attrs\n dat = ds.rolling(dim={\"coord\": 5}, min_periods=None, center=False).mean()\n assert dat.attrs == {}\n\n # Test kept attrs using dataset keyword\n dat = ds.rolling(\n dim={\"coord\": 5}, min_periods=None, center=False, keep_attrs=True\n ).mean()\n assert dat.attrs == _attrs\n\n # Test kept attrs using global option\n with set_options(keep_attrs=True):\n dat = ds.rolling(dim={\"coord\": 5}, min_periods=None, center=False).mean()\n assert dat.attrs == _attrs\n\n\ndef test_rolling_properties(ds):\n # catching invalid args\n with pytest.raises(ValueError, match=\"window must be > 0\"):\n ds.rolling(time=-2)\n with pytest.raises(ValueError, match=\"min_periods must be greater than zero\"):\n ds.rolling(time=2, min_periods=0)\n with pytest.raises(KeyError, match=\"time2\"):\n ds.rolling(time2=2)\n\n\[email protected](\"name\", (\"sum\", \"mean\", \"std\", \"var\", \"min\", \"max\", \"median\"))\[email protected](\"center\", (True, False, None))\[email protected](\"min_periods\", (1, None))\[email protected](\"key\", (\"z1\", \"z2\"))\ndef test_rolling_wrapped_bottleneck(ds, name, center, min_periods, key):\n bn = pytest.importorskip(\"bottleneck\", minversion=\"1.1\")\n\n # Test all bottleneck functions\n rolling_obj = ds.rolling(time=7, min_periods=min_periods)\n\n func_name = f\"move_{name}\"\n actual = getattr(rolling_obj, name)()\n if key == \"z1\": # z1 does not depend on 'Time' axis. Stored as it is.\n expected = ds[key]\n elif key == \"z2\":\n expected = getattr(bn, func_name)(\n ds[key].values, window=7, axis=0, min_count=min_periods\n )\n else:\n raise ValueError\n assert_array_equal(actual[key].values, expected)\n\n # Test center\n rolling_obj = ds.rolling(time=7, center=center)\n actual = getattr(rolling_obj, name)()[\"time\"]\n assert_equal(actual, ds[\"time\"])\n\n\n@requires_numbagg\ndef test_rolling_exp(ds):\n\n result = ds.rolling_exp(time=10, window_type=\"span\").mean()\n assert isinstance(result, Dataset)\n\n\[email protected](\"center\", (True, False))\[email protected](\"min_periods\", (None, 1, 2, 3))\[email protected](\"window\", (1, 2, 3, 4))\ndef test_rolling_pandas_compat(center, window, min_periods):\n df = pd.DataFrame(\n {\n \"x\": np.random.randn(20),\n \"y\": np.random.randn(20),\n \"time\": np.linspace(0, 1, 20),\n }\n )\n ds = Dataset.from_dataframe(df)\n\n if min_periods is not None and window < min_periods:\n min_periods = window\n\n df_rolling = df.rolling(window, center=center, min_periods=min_periods).mean()\n ds_rolling = ds.rolling(index=window, center=center, min_periods=min_periods).mean()\n\n np.testing.assert_allclose(df_rolling[\"x\"].values, ds_rolling[\"x\"].values)\n np.testing.assert_allclose(df_rolling.index, ds_rolling[\"index\"])\n\n\[email protected](\"center\", (True, False))\[email protected](\"window\", (1, 2, 3, 4))\ndef test_rolling_construct(center, window):\n df = pd.DataFrame(\n {\n \"x\": np.random.randn(20),\n \"y\": np.random.randn(20),\n \"time\": np.linspace(0, 1, 20),\n }\n )\n\n ds = Dataset.from_dataframe(df)\n df_rolling = df.rolling(window, center=center, min_periods=1).mean()\n ds_rolling = ds.rolling(index=window, center=center)\n\n ds_rolling_mean = ds_rolling.construct(\"window\").mean(\"window\")\n np.testing.assert_allclose(df_rolling[\"x\"].values, ds_rolling_mean[\"x\"].values)\n np.testing.assert_allclose(df_rolling.index, ds_rolling_mean[\"index\"])\n\n # with stride\n ds_rolling_mean = ds_rolling.construct(\"window\", stride=2).mean(\"window\")\n np.testing.assert_allclose(df_rolling[\"x\"][::2].values, ds_rolling_mean[\"x\"].values)\n np.testing.assert_allclose(df_rolling.index[::2], ds_rolling_mean[\"index\"])\n # with fill_value\n ds_rolling_mean = ds_rolling.construct(\"window\", stride=2, fill_value=0.0).mean(\n \"window\"\n )\n assert (ds_rolling_mean.isnull().sum() == 0).to_array(dim=\"vars\").all()\n assert (ds_rolling_mean[\"x\"] == 0.0).sum() >= 0\n\n\[email protected]\[email protected](\"ds\", (1, 2), indirect=True)\[email protected](\"center\", (True, False))\[email protected](\"min_periods\", (None, 1, 2, 3))\[email protected](\"window\", (1, 2, 3, 4))\[email protected](\"name\", (\"sum\", \"mean\", \"std\", \"var\", \"min\", \"max\", \"median\"))\ndef test_rolling_reduce(ds, center, min_periods, window, name):\n\n if min_periods is not None and window < min_periods:\n min_periods = window\n\n if name == \"std\" and window == 1:\n pytest.skip(\"std with window == 1 is unstable in bottleneck\")\n\n rolling_obj = ds.rolling(time=window, center=center, min_periods=min_periods)\n\n # add nan prefix to numpy methods to get similar behavior as bottleneck\n actual = rolling_obj.reduce(getattr(np, \"nan%s\" % name))\n expected = getattr(rolling_obj, name)()\n assert_allclose(actual, expected)\n assert ds.dims == actual.dims\n # make sure the order of data_var are not changed.\n assert list(ds.data_vars.keys()) == list(actual.data_vars.keys())\n\n # Make sure the dimension order is restored\n for key, src_var in ds.data_vars.items():\n assert src_var.dims == actual[key].dims\n\n\[email protected](\"ds\", (2,), indirect=True)\[email protected](\"center\", (True, False))\[email protected](\"min_periods\", (None, 1))\[email protected](\"name\", (\"sum\", \"max\"))\[email protected](\"dask\", (True, False))\ndef test_ndrolling_reduce(ds, center, min_periods, name, dask):\n if dask and has_dask:\n ds = ds.chunk({\"x\": 4})\n\n rolling_obj = ds.rolling(time=4, x=3, center=center, min_periods=min_periods)\n\n actual = getattr(rolling_obj, name)()\n expected = getattr(\n getattr(\n ds.rolling(time=4, center=center, min_periods=min_periods), name\n )().rolling(x=3, center=center, min_periods=min_periods),\n name,\n )()\n assert_allclose(actual, expected)\n assert actual.dims == expected.dims\n\n # Do it in the opposite order\n expected = getattr(\n getattr(\n ds.rolling(x=3, center=center, min_periods=min_periods), name\n )().rolling(time=4, center=center, min_periods=min_periods),\n name,\n )()\n\n assert_allclose(actual, expected)\n assert actual.dims == expected.dims\n\n\[email protected](\"center\", (True, False, (True, False)))\[email protected](\"fill_value\", (np.nan, 0.0))\[email protected](\"dask\", (True, False))\ndef test_ndrolling_construct(center, fill_value, dask):\n da = DataArray(\n np.arange(5 * 6 * 7).reshape(5, 6, 7).astype(float),\n dims=[\"x\", \"y\", \"z\"],\n coords={\"x\": [\"a\", \"b\", \"c\", \"d\", \"e\"], \"y\": np.arange(6)},\n )\n ds = xr.Dataset({\"da\": da})\n if dask and has_dask:\n ds = ds.chunk({\"x\": 4})\n\n actual = ds.rolling(x=3, z=2, center=center).construct(\n x=\"x1\", z=\"z1\", fill_value=fill_value\n )\n if not isinstance(center, tuple):\n center = (center, center)\n expected = (\n ds.rolling(x=3, center=center[0])\n .construct(x=\"x1\", fill_value=fill_value)\n .rolling(z=2, center=center[1])\n .construct(z=\"z1\", fill_value=fill_value)\n )\n assert_allclose(actual, expected)\n\n\ndef test_raise_no_warning_for_nan_in_binary_ops():\n with pytest.warns(None) as record:\n Dataset(data_vars={\"x\": (\"y\", [1, 2, np.NaN])}) > 0\n assert len(record) == 0\n\n\[email protected](\"error\")\[email protected](\"ds\", (2,), indirect=True)\ndef test_raise_no_warning_assert_close(ds):\n assert_allclose(ds, ds)\n\n\[email protected](reason=\"See https://github.com/pydata/xarray/pull/4369 or docstring\")\[email protected](\"error\")\[email protected](\"ds\", (2,), indirect=True)\[email protected](\"name\", (\"mean\", \"max\"))\ndef test_raise_no_warning_dask_rolling_assert_close(ds, name):\n \"\"\"\n This is a puzzle — I can't easily find the source of the warning. It\n requires `assert_allclose` to be run, for the `ds` param to be 2, and is\n different for `mean` and `max`. `sum` raises no warning.\n \"\"\"\n\n ds = ds.chunk({\"x\": 4})\n\n rolling_obj = ds.rolling(time=4, x=3)\n\n actual = getattr(rolling_obj, name)()\n expected = getattr(getattr(ds.rolling(time=4), name)().rolling(x=3), name)()\n assert_allclose(actual, expected)\n\n\[email protected](\"dask\", [True, False])\[email protected](\"edge_order\", [1, 2])\ndef test_differentiate(dask, edge_order):\n rs = np.random.RandomState(42)\n coord = [0.2, 0.35, 0.4, 0.6, 0.7, 0.75, 0.76, 0.8]\n\n da = xr.DataArray(\n rs.randn(8, 6),\n dims=[\"x\", \"y\"],\n coords={\"x\": coord, \"z\": 3, \"x2d\": ((\"x\", \"y\"), rs.randn(8, 6))},\n )\n if dask and has_dask:\n da = da.chunk({\"x\": 4})\n\n ds = xr.Dataset({\"var\": da})\n\n # along x\n actual = da.differentiate(\"x\", edge_order)\n expected_x = xr.DataArray(\n np.gradient(da, da[\"x\"], axis=0, edge_order=edge_order),\n dims=da.dims,\n coords=da.coords,\n )\n assert_equal(expected_x, actual)\n assert_equal(\n ds[\"var\"].differentiate(\"x\", edge_order=edge_order),\n ds.differentiate(\"x\", edge_order=edge_order)[\"var\"],\n )\n # coordinate should not change\n assert_equal(da[\"x\"], actual[\"x\"])\n\n # along y\n actual = da.differentiate(\"y\", edge_order)\n expected_y = xr.DataArray(\n np.gradient(da, da[\"y\"], axis=1, edge_order=edge_order),\n dims=da.dims,\n coords=da.coords,\n )\n assert_equal(expected_y, actual)\n assert_equal(actual, ds.differentiate(\"y\", edge_order=edge_order)[\"var\"])\n assert_equal(\n ds[\"var\"].differentiate(\"y\", edge_order=edge_order),\n ds.differentiate(\"y\", edge_order=edge_order)[\"var\"],\n )\n\n with pytest.raises(ValueError):\n da.differentiate(\"x2d\")\n\n\[email protected](\"dask\", [True, False])\ndef test_differentiate_datetime(dask):\n rs = np.random.RandomState(42)\n coord = np.array(\n [\n \"2004-07-13\",\n \"2006-01-13\",\n \"2010-08-13\",\n \"2010-09-13\",\n \"2010-10-11\",\n \"2010-12-13\",\n \"2011-02-13\",\n \"2012-08-13\",\n ],\n dtype=\"datetime64\",\n )\n\n da = xr.DataArray(\n rs.randn(8, 6),\n dims=[\"x\", \"y\"],\n coords={\"x\": coord, \"z\": 3, \"x2d\": ((\"x\", \"y\"), rs.randn(8, 6))},\n )\n if dask and has_dask:\n da = da.chunk({\"x\": 4})\n\n # along x\n actual = da.differentiate(\"x\", edge_order=1, datetime_unit=\"D\")\n expected_x = xr.DataArray(\n np.gradient(\n da, da[\"x\"].variable._to_numeric(datetime_unit=\"D\"), axis=0, edge_order=1\n ),\n dims=da.dims,\n coords=da.coords,\n )\n assert_equal(expected_x, actual)\n\n actual2 = da.differentiate(\"x\", edge_order=1, datetime_unit=\"h\")\n assert np.allclose(actual, actual2 * 24)\n\n # for datetime variable\n actual = da[\"x\"].differentiate(\"x\", edge_order=1, datetime_unit=\"D\")\n assert np.allclose(actual, 1.0)\n\n # with different date unit\n da = xr.DataArray(coord.astype(\"datetime64[ms]\"), dims=[\"x\"], coords={\"x\": coord})\n actual = da.differentiate(\"x\", edge_order=1)\n assert np.allclose(actual, 1.0)\n\n\[email protected](not has_cftime, reason=\"Test requires cftime.\")\[email protected](\"dask\", [True, False])\ndef test_differentiate_cftime(dask):\n rs = np.random.RandomState(42)\n coord = xr.cftime_range(\"2000\", periods=8, freq=\"2M\")\n\n da = xr.DataArray(\n rs.randn(8, 6),\n coords={\"time\": coord, \"z\": 3, \"t2d\": ((\"time\", \"y\"), rs.randn(8, 6))},\n dims=[\"time\", \"y\"],\n )\n\n if dask and has_dask:\n da = da.chunk({\"time\": 4})\n\n actual = da.differentiate(\"time\", edge_order=1, datetime_unit=\"D\")\n expected_data = np.gradient(\n da, da[\"time\"].variable._to_numeric(datetime_unit=\"D\"), axis=0, edge_order=1\n )\n expected = xr.DataArray(expected_data, coords=da.coords, dims=da.dims)\n assert_equal(expected, actual)\n\n actual2 = da.differentiate(\"time\", edge_order=1, datetime_unit=\"h\")\n assert_allclose(actual, actual2 * 24)\n\n # Test the differentiation of datetimes themselves\n actual = da[\"time\"].differentiate(\"time\", edge_order=1, datetime_unit=\"D\")\n assert_allclose(actual, xr.ones_like(da[\"time\"]).astype(float))\n\n\[email protected](\"dask\", [True, False])\ndef test_integrate(dask):\n rs = np.random.RandomState(42)\n coord = [0.2, 0.35, 0.4, 0.6, 0.7, 0.75, 0.76, 0.8]\n\n da = xr.DataArray(\n rs.randn(8, 6),\n dims=[\"x\", \"y\"],\n coords={\n \"x\": coord,\n \"x2\": ((\"x\",), rs.randn(8)),\n \"z\": 3,\n \"x2d\": ((\"x\", \"y\"), rs.randn(8, 6)),\n },\n )\n if dask and has_dask:\n da = da.chunk({\"x\": 4})\n\n ds = xr.Dataset({\"var\": da})\n\n # along x\n actual = da.integrate(\"x\")\n # coordinate that contains x should be dropped.\n expected_x = xr.DataArray(\n np.trapz(da.compute(), da[\"x\"], axis=0),\n dims=[\"y\"],\n coords={k: v for k, v in da.coords.items() if \"x\" not in v.dims},\n )\n assert_allclose(expected_x, actual.compute())\n assert_equal(ds[\"var\"].integrate(\"x\"), ds.integrate(\"x\")[\"var\"])\n\n # make sure result is also a dask array (if the source is dask array)\n assert isinstance(actual.data, type(da.data))\n\n # along y\n actual = da.integrate(\"y\")\n expected_y = xr.DataArray(\n np.trapz(da, da[\"y\"], axis=1),\n dims=[\"x\"],\n coords={k: v for k, v in da.coords.items() if \"y\" not in v.dims},\n )\n assert_allclose(expected_y, actual.compute())\n assert_equal(actual, ds.integrate(\"y\")[\"var\"])\n assert_equal(ds[\"var\"].integrate(\"y\"), ds.integrate(\"y\")[\"var\"])\n\n # along x and y\n actual = da.integrate((\"y\", \"x\"))\n assert actual.ndim == 0\n\n with pytest.raises(ValueError):\n da.integrate(\"x2d\")\n\n\[email protected](\"dask\", [True, False])\[email protected](\"which_datetime\", [\"np\", \"cftime\"])\ndef test_trapz_datetime(dask, which_datetime):\n rs = np.random.RandomState(42)\n if which_datetime == \"np\":\n coord = np.array(\n [\n \"2004-07-13\",\n \"2006-01-13\",\n \"2010-08-13\",\n \"2010-09-13\",\n \"2010-10-11\",\n \"2010-12-13\",\n \"2011-02-13\",\n \"2012-08-13\",\n ],\n dtype=\"datetime64\",\n )\n else:\n if not has_cftime:\n pytest.skip(\"Test requires cftime.\")\n coord = xr.cftime_range(\"2000\", periods=8, freq=\"2D\")\n\n da = xr.DataArray(\n rs.randn(8, 6),\n coords={\"time\": coord, \"z\": 3, \"t2d\": ((\"time\", \"y\"), rs.randn(8, 6))},\n dims=[\"time\", \"y\"],\n )\n\n if dask and has_dask:\n da = da.chunk({\"time\": 4})\n\n actual = da.integrate(\"time\", datetime_unit=\"D\")\n expected_data = np.trapz(\n da.data,\n duck_array_ops.datetime_to_numeric(da[\"time\"].data, datetime_unit=\"D\"),\n axis=0,\n )\n expected = xr.DataArray(\n expected_data,\n dims=[\"y\"],\n coords={k: v for k, v in da.coords.items() if \"time\" not in v.dims},\n )\n assert_allclose(expected, actual.compute())\n\n # make sure result is also a dask array (if the source is dask array)\n assert isinstance(actual.data, type(da.data))\n\n actual2 = da.integrate(\"time\", datetime_unit=\"h\")\n assert_allclose(actual, actual2 / 24.0)\n\n\ndef test_no_dict():\n d = Dataset()\n with pytest.raises(AttributeError):\n d.__dict__\n\n\ndef test_subclass_slots():\n \"\"\"Test that Dataset subclasses must explicitly define ``__slots__``.\n\n .. note::\n As of 0.13.0, this is actually mitigated into a FutureWarning for any class\n defined outside of the xarray package.\n \"\"\"\n with pytest.raises(AttributeError) as e:\n\n class MyDS(Dataset):\n pass\n\n assert str(e.value) == \"MyDS must explicitly define __slots__\"\n\n\ndef test_weakref():\n \"\"\"Classes with __slots__ are incompatible with the weakref module unless they\n explicitly state __weakref__ among their slots\n \"\"\"\n from weakref import ref\n\n ds = Dataset()\n r = ref(ds)\n assert r() is ds\n\n\ndef test_deepcopy_obj_array():\n x0 = Dataset(dict(foo=DataArray(np.array([object()]))))\n x1 = deepcopy(x0)\n assert x0[\"foo\"].values[0] is not x1[\"foo\"].values[0]\n" ]
[ [ "pandas.Series", "numpy.linspace", "numpy.asarray", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "numpy.dtype", "numpy.random.randn", "numpy.mean", "numpy.trapz", "numpy.random.randint", "numpy.testing.assert_equal", "numpy.allclose", "pandas.CategoricalDtype", "numpy.arange", "pandas.Index", "numpy.sin", "numpy.diff", "numpy.float32", "pandas.testing.assert_index_equal", "numpy.zeros", "pandas.MultiIndex", "numpy.isnan", "pandas.Timedelta", "numpy.int64", "pandas.MultiIndex.from_product", "numpy.random.rand", "numpy.testing.assert_allclose", "pandas.date_range", "numpy.array", "numpy.random.RandomState", "numpy.meshgrid", "pandas.CategoricalIndex", "numpy.random.random", "pandas.period_range", "numpy.gradient", "pandas.MultiIndex.from_arrays", "numpy.datetime64", "numpy.testing.assert_array_equal", "numpy.ones", "numpy.float64", "numpy.random.uniform", "pandas.Timestamp" ] ]
tianrui/coremltools
[ "f662dea15e3207bd02f44b32d8e35c2893c954a1" ]
[ "coremltools/test/neural_network/test_numpy_nn_layers.py" ]
[ "from __future__ import print_function\n\nimport itertools\nimport math\nimport os\nimport random\nimport shutil\nimport tempfile\nimport unittest\nimport uuid\nimport pytest\nfrom packaging import version\n\nimport numpy as np\nimport pytest\nimport tensorflow as tf\n\nimport coremltools\nimport coremltools.models.datatypes as datatypes\nfrom coremltools.models import _MLMODEL_FULL_PRECISION, _MLMODEL_HALF_PRECISION\nfrom coremltools.models import neural_network as neural_network\nfrom coremltools.models.neural_network import flexible_shape_utils\nfrom coremltools.models.utils import macos_version, is_macos\n\nnp.random.seed(10)\n\nMIN_MACOS_VERSION_REQUIRED = (10, 13)\nLAYERS_10_15_MACOS_VERSION = (10, 15)\n\n\ndef _get_unary_model_spec(x, mode, alpha=1.0):\n input_dim = x.shape\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', datatypes.Array(*input_dim))]\n\n builder = neural_network.NeuralNetworkBuilder(input_features,\n output_features)\n\n builder.add_unary(name='unary', input_name='data',\n output_name='output', mode=mode, alpha=alpha)\n return builder.spec\n\n\nclass CorrectnessTest(unittest.TestCase):\n def runTest(self):\n pass\n\n def _compare_shapes(self, np_preds, coreml_preds):\n return np.squeeze(np_preds).shape == np.squeeze(coreml_preds).shape\n\n def _compare_nd_shapes(self, np_preds, coreml_preds, shape=()):\n if shape:\n return coreml_preds.shape == shape\n else:\n # check if shape has 0 valued dimension\n if np.prod(np_preds.shape) == 0 and np.prod(coreml_preds.shape) == 0:\n return True\n return coreml_preds.shape == np_preds.shape\n\n def _compare_predictions(self, np_preds, coreml_preds, delta=.01):\n np_preds = np_preds.flatten()\n coreml_preds = coreml_preds.flatten()\n for i in range(len(np_preds)):\n max_den = max(1.0, np_preds[i], coreml_preds[i])\n if np.abs(\n np_preds[i] / max_den - coreml_preds[i] / max_den) > delta:\n return False\n return True\n\n @staticmethod\n def _compare_moments(model, inputs, expected, use_cpu_only=True, num_moments=10):\n \"\"\"\n This utility function is used for validate random distributions layers.\n It validates the first 10 moments of prediction and expected values.\n \"\"\"\n\n def get_moment(data, k):\n return np.mean(np.power(data - np.mean(data), k))\n\n if isinstance(model, str):\n model = coremltools.models.MLModel(model)\n\n model = coremltools.models.MLModel(model, useCPUOnly=use_cpu_only)\n prediction = model.predict(inputs, useCPUOnly=use_cpu_only)\n\n for output_name in expected:\n np_preds = expected[output_name]\n coreml_preds = prediction[output_name]\n\n np_moments = [get_moment(np_preds.flatten(), k) for k in range(num_moments)]\n coreml_moments = [get_moment(coreml_preds.flatten(), k) for k in range(num_moments)]\n\n np.testing.assert_almost_equal(np_moments, coreml_moments, decimal=2)\n\n # override expected values to allow element-wise compares\n for output_name in expected:\n expected[output_name] = prediction[output_name]\n\n def _test_model(self,\n model,\n input,\n expected,\n model_precision=_MLMODEL_FULL_PRECISION,\n useCPUOnly=False,\n output_name_shape_dict={},\n validate_shapes_only=False):\n\n model_dir = None\n # if we're given a path to a model\n if isinstance(model, str):\n model = coremltools.models.MLModel(model)\n\n # If we're passed in a specification, save out the model\n # and then load it back up\n elif isinstance(model, coremltools.proto.Model_pb2.Model):\n model_dir = tempfile.mkdtemp()\n model_name = str(uuid.uuid4()) + '.mlmodel'\n model_path = os.path.join(model_dir, model_name)\n coremltools.utils.save_spec(model, model_path)\n model = coremltools.models.MLModel(model, useCPUOnly=useCPUOnly)\n\n # If we want to test the half precision case\n if model_precision == _MLMODEL_HALF_PRECISION:\n model = coremltools.utils.convert_neural_network_weights_to_fp16(\n model)\n\n try:\n prediction = model.predict(input, useCPUOnly=useCPUOnly)\n for output_name in expected:\n if self.__class__.__name__ == \"SimpleTest\":\n assert (self._compare_shapes(expected[output_name],\n prediction[output_name]))\n else:\n if output_name in output_name_shape_dict:\n output_shape = output_name_shape_dict[output_name]\n else:\n output_shape = []\n\n if len(output_shape) == 0 and len(expected[output_name].shape) == 0:\n output_shape = (1,)\n assert (self._compare_nd_shapes(expected[output_name],\n prediction[output_name],\n output_shape))\n\n if not validate_shapes_only:\n assert (self._compare_predictions(expected[output_name],\n prediction[output_name]))\n finally:\n # Remove the temporary directory if we created one\n if model_dir and os.path.exists(model_dir):\n shutil.rmtree(model_dir)\n\n\[email protected](not is_macos() or macos_version() < MIN_MACOS_VERSION_REQUIRED,\n 'macOS 10.13+ is required. Skipping tests.')\nclass SimpleTest(CorrectnessTest):\n\n def test_tiny_upsample_linear_mode(self):\n input_dim = (1, 1, 3) # (C,H,W)\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(input_features,\n output_features)\n builder.add_upsample(name='upsample',\n scaling_factor_h=2, scaling_factor_w=3,\n input_name='data', output_name='output',\n mode='BILINEAR')\n\n input = {\n 'data': np.reshape(np.array([1.0, 2.0, 3.0]), (1, 1, 3))\n }\n expected = {\n 'output': np.array(\n [[1, 1.333, 1.666, 2, 2.333, 2.666, 3, 3, 3],\n [1, 1.333, 1.6666, 2, 2.33333, 2.6666, 3, 3, 3]\n ])\n }\n\n self._test_model(builder.spec, input, expected)\n self.assertEquals(len(input_dim), builder._get_rank('output'))\n\n def test_LRN(self):\n input_dim = (1, 3, 3)\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', datatypes.Array(*input_dim))]\n\n builder = neural_network.NeuralNetworkBuilder(input_features,\n output_features)\n builder.add_lrn(name='lrn', input_name='data', output_name='output',\n alpha=2, beta=3, local_size=1, k=8)\n\n input = {\n 'data': np.ones((1, 3, 3))\n }\n expected = {\n 'output': 1e-3 * np.ones((1, 3, 3))\n }\n\n self._test_model(builder.spec, input, expected)\n self.assertEqual(len(input_dim), builder._get_rank('output'))\n\n def test_MVN(self):\n input_dim = (2, 2, 2)\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', datatypes.Array(*input_dim))]\n\n builder = neural_network.NeuralNetworkBuilder(input_features,\n output_features)\n builder.add_mvn(name='mvn', input_name='data', output_name='output',\n across_channels=False, normalize_variance=False)\n\n input = {\n 'data': np.reshape(np.arange(8, dtype=np.float32), (2, 2, 2))\n }\n expected = {\n 'output': np.reshape(np.arange(8) - np.array(\n [1.5, 1.5, 1.5, 1.5, 5.5, 5.5, 5.5, 5.5]), (2, 2, 2))\n }\n\n self._test_model(builder.spec, input, expected)\n\n def test_L2_normalize(self):\n input_dim = (1, 2, 2)\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', datatypes.Array(*input_dim))]\n\n builder = neural_network.NeuralNetworkBuilder(input_features,\n output_features)\n builder.add_l2_normalize(name='mvn', input_name='data',\n output_name='output')\n\n input = {\n 'data': np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))\n }\n expected = {\n 'output': np.reshape(np.arange(4, dtype=np.float32),\n (1, 2, 2)) / np.sqrt(14)\n }\n\n self._test_model(builder.spec, input, expected)\n\n def test_unary_sqrt(self):\n x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))\n input = {'data': x}\n expected = {'output': np.sqrt(x)}\n spec = _get_unary_model_spec(x, 'sqrt')\n self._test_model(spec, input, expected)\n\n def test_unary_rsqrt(self):\n x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))\n input = {'data': x}\n expected = {'output': 1 / np.sqrt(x)}\n spec = _get_unary_model_spec(x, 'rsqrt')\n self._test_model(spec, input, expected)\n\n def test_unary_inverse(self):\n x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))\n input = {'data': x}\n expected = {'output': 1 / x}\n spec = _get_unary_model_spec(x, 'inverse')\n self._test_model(spec, input, expected)\n\n def test_unary_power(self):\n x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))\n input = {'data': x}\n expected = {'output': x ** 3}\n spec = _get_unary_model_spec(x, 'power', 3)\n self._test_model(spec, input, expected)\n\n def test_unary_exp(self):\n x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))\n input = {'data': x}\n expected = {'output': np.exp(x)}\n spec = _get_unary_model_spec(x, 'exp')\n self._test_model(spec, input, expected)\n\n def test_unary_log(self):\n x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))\n input = {'data': x}\n expected = {'output': np.log(x)}\n spec = _get_unary_model_spec(x, 'log')\n self._test_model(spec, input, expected)\n\n def test_unary_abs(self):\n x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))\n input = {'data': x}\n expected = {'output': np.abs(x)}\n spec = _get_unary_model_spec(x, 'abs')\n self._test_model(spec, input, expected)\n\n def test_unary_threshold(self):\n x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))\n input = {'data': x}\n expected = {'output': np.maximum(x, 2)}\n spec = _get_unary_model_spec(x, 'threshold', 2)\n self._test_model(spec, input, expected)\n\n def test_split(self):\n input_dim = (9, 2, 2)\n x = np.random.rand(*input_dim)\n\n input_features = [('data', datatypes.Array(*input_dim))]\n output_names = []\n output_features = []\n for i in range(3):\n out = 'out_' + str(i)\n output_names.append(out)\n output_features.append((out, None))\n\n builder = neural_network.NeuralNetworkBuilder(input_features,\n output_features)\n builder.add_split(name='split', input_name='data',\n output_names=output_names)\n\n input = {'data': x}\n expected = {\n 'out_0': x[0: 3, :, :],\n 'out_1': x[3: 6, :, :],\n 'out_2': x[6: 9, :, :]\n }\n\n self._test_model(builder.spec, input, expected)\n for output_ in output_names:\n self.assertEqual(len(input_dim), builder._get_rank(output_))\n \n def test_scale_constant(self):\n input_dim = (1, 2, 2)\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(input_features,\n output_features)\n builder.add_scale(name='scale', W=5, b=45, has_bias=True,\n input_name='data', output_name='output')\n\n x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))\n input = {'data': x}\n expected = {'output': 5 * x + 45}\n\n self._test_model(builder.spec, input, expected)\n\n def test_scale_matrix(self):\n input_dim = (1, 2, 2)\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(input_features,\n output_features)\n W = np.reshape(np.arange(5, 9), (1, 2, 2))\n\n builder.add_scale(name='scale', W=W, b=None, has_bias=False,\n input_name='data', output_name='output',\n shape_scale=[1, 2, 2])\n\n x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))\n input = {'data': x}\n expected = {'output': W * x}\n\n self._test_model(builder.spec, input, expected)\n\n def test_bias_constant(self):\n input_dim = (1, 2, 2)\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(input_features,\n output_features)\n builder.add_bias(name='bias', b=45, input_name='data',\n output_name='output')\n\n x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))\n input = {'data': x}\n expected = {'output': x + 45}\n\n self._test_model(builder.spec, input, expected)\n\n def test_bias_matrix(self):\n input_dim = (1, 2, 2)\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(input_features,\n output_features)\n b = np.reshape(np.arange(5, 9), (1, 2, 2))\n\n builder.add_bias(name='bias', b=b, input_name='data',\n output_name='output',\n shape_bias=[1, 2, 2])\n\n x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))\n input = {'data': x}\n expected = {'output': x + b}\n\n self._test_model(builder.spec, input, expected)\n\n def test_load_constant(self, model_precision=_MLMODEL_FULL_PRECISION):\n input_dim = (1, 2, 2)\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(input_features,\n output_features)\n b = np.reshape(np.arange(5, 9), (1, 2, 2))\n\n builder.add_load_constant(name='load_constant', output_name='bias',\n constant_value=b, shape=[1, 2, 2])\n builder.add_elementwise(name='add', input_names=['data', 'bias'],\n output_name='output', mode='ADD')\n\n x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))\n input = {'data': x}\n expected = {'output': x + b}\n\n self._test_model(builder.spec, input, expected, model_precision)\n self.assertEqual(len(input_dim), builder._get_rank('output'))\n\n def test_load_constant_half_precision(self):\n self.test_load_constant(model_precision=_MLMODEL_HALF_PRECISION)\n\n def test_min(self):\n input_dim = (1, 2, 2)\n input_features = [('data_0', datatypes.Array(*input_dim)),\n ('data_1', datatypes.Array(*input_dim))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(input_features,\n output_features)\n\n builder.add_elementwise(name='min', input_names=['data_0', 'data_1'],\n output_name='output', mode='MIN')\n x1 = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))\n x2 = np.reshape(np.arange(2, 6, dtype=np.float32), (1, 2, 2))\n\n input = {'data_0': x1, 'data_1': x2}\n expected = {'output': np.minimum(x1, x2)}\n\n self._test_model(builder.spec, input, expected)\n self.assertEqual(len(input_dim), builder._get_rank('output'))\n\n def test_conv_same_padding(self):\n input_dim = (10, 15, 15)\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(input_features,\n output_features)\n W = np.random.rand(3, 3, 10, 20)\n\n builder.add_convolution(name='conv', kernel_channels=10,\n output_channels=20,\n height=3, width=3, stride_height=2,\n stride_width=2,\n border_mode='same', groups=1,\n W=W, b=None, has_bias=False,\n input_name='data', output_name='output',\n same_padding_asymmetry_mode='TOP_LEFT_HEAVY')\n\n x = np.random.rand(*input_dim)\n input = {'data': x}\n expected = {'output': np.random.rand(20, 8, 8)}\n\n self._test_model(\n builder.spec, input, expected, validate_shapes_only=True)\n self.assertEqual(len(input_dim), builder._get_rank('output'))\n\n def test_deconv_valid_padding(self):\n input_dim = (10, 15, 15)\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(input_features,\n output_features)\n W = np.random.rand(3, 3, 10, 20)\n\n builder.add_convolution(name='deconv', kernel_channels=10,\n output_channels=20,\n height=3, width=3, stride_height=2,\n stride_width=2,\n border_mode='valid', groups=1,\n W=W, b=None, has_bias=False,\n is_deconv=True,\n input_name='data', output_name='output',\n padding_top=2, padding_bottom=3,\n padding_left=2, padding_right=3)\n\n x = np.random.rand(*input_dim)\n input = {'data': x}\n expected = {'output': np.random.rand(20, 26, 26)}\n\n self._test_model(\n builder.spec, input, expected, validate_shapes_only=True)\n\n def test_deconv_non_unit_groups(self):\n input_dim = (16, 15, 15)\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features)\n\n W = np.random.rand(3, 3, 16, 5)\n builder.add_convolution(name='deconv', kernel_channels=16,\n output_channels=20,\n height=3, width=3, stride_height=2,\n stride_width=2,\n border_mode='valid', groups=4,\n W=W, b=None, has_bias=False,\n is_deconv=True,\n input_name='data', output_name='output',\n padding_top=2, padding_bottom=3,\n padding_left=2, padding_right=3)\n\n x = np.random.rand(*input_dim)\n input = {'data': x}\n expected = {'output': np.random.rand(20, 26, 26)}\n\n self._test_model(\n builder.spec, input, expected, validate_shapes_only=True)\n\n def test_linear_activation(self):\n input_dim = (10, 15, 15)\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(input_features,\n output_features)\n builder.add_activation(name='activation',\n non_linearity='LINEAR',\n input_name='data',\n output_name='output', params=[34.0, 67.0])\n\n x = np.random.rand(*input_dim)\n input = {'data': x}\n expected = {'output': 34.0 * x + 67.0}\n\n self._test_model(builder.spec, input, expected)\n\n def test_padding_constant(self):\n input_dim = (1, 2, 3)\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features)\n builder.add_padding(name='pad',\n left=1, right=0, top=2, bottom=0,\n value=-1,\n input_name='data',\n output_name='output')\n\n x = np.reshape(np.array([[1, 2, 3], [4, 5, 6]]), (1, 2, 3)).astype(\n np.float32)\n input = {'data': x}\n y = np.reshape(\n np.array([[-1, -1, -1, -1], [-1, -1, -1, -1], [-1, 1, 2, 3],\n [-1, 4, 5, 6]]), (1, 4, 4)).astype(np.float32)\n expected = {'output': y}\n\n self._test_model(builder.spec, input, expected)\n\n def test_padding_replication(self):\n input_dim = (1, 2, 3)\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(input_features,\n output_features)\n builder.add_padding(name='pad',\n left=1, top=2,\n input_name='data',\n output_name='output', padding_type='replication')\n\n x = np.reshape(np.array([[1, 2, 3], [4, 5, 6]]), (1, 2, 3)).astype(\n np.float32)\n input = {'data': x}\n y = np.reshape(np.array([[1, 1, 2, 3], [1, 1, 2, 3], [1, 1, 2, 3],\n [4, 4, 5, 6]]), (1, 4, 4)).astype(np.float32)\n expected = {'output': y}\n\n self._test_model(builder.spec, input, expected)\n\n def test_reshape_target_shape_3(self):\n input_dim = (1, 2, 5) # (C,H,W)\n target_dim = (10, 1, 1)\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(input_features,\n output_features)\n builder.add_reshape(name='reshape', input_name='data',\n output_name='output', target_shape=target_dim,\n mode=0)\n\n x = np.random.rand(*input_dim)\n input = {'data': x}\n expected = {'output': np.reshape(x, (10, 1, 1))}\n\n self._test_model(builder.spec, input, expected)\n self.assertEqual(len(target_dim), builder._get_rank('output'))\n\n def test_reshape_target_shape_4(self):\n input_dim = (1, 2, 5) # (C,H,W)\n target_dim = (1, 10, 1, 1)\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(input_features,\n output_features)\n builder.add_reshape(name='reshape', input_name='data',\n output_name='output', target_shape=target_dim,\n mode=0)\n\n x = np.random.rand(*input_dim)\n input = {'data': x}\n expected = {'output': np.reshape(x, (1, 10, 1, 1))}\n\n self._test_model(builder.spec, input, expected)\n self.assertEqual(len(target_dim), builder._get_rank('output')) \n\n def test_bias_matrix_cpu(self):\n input_dim = (1, 2, 2)\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(input_features,\n output_features)\n b = np.reshape(np.arange(5, 9), (1, 2, 2))\n\n builder.add_bias(name='bias', b=b, input_name='data',\n output_name='output',\n shape_bias=[1, 2, 2])\n\n x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))\n input = {'data': x}\n expected = {'output': x + b}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=True)\n\n def test_linear_activation_cpu(self):\n input_dim = (10, 15, 15)\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(input_features,\n output_features)\n builder.add_activation(name='activation',\n non_linearity='LINEAR',\n input_name='data',\n output_name='output', params=[34.0, 67.0])\n\n x = np.random.rand(*input_dim)\n input = {'data': x}\n expected = {'output': 34.0 * x + 67.0}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=True)\n\n\[email protected](not is_macos() or macos_version() < LAYERS_10_15_MACOS_VERSION,\n 'macOS 10.15+ required. Skipping tests.')\nclass NewLayersSimpleTest(CorrectnessTest):\n\n def test_shape_flexibility_range(self):\n\n input_features = [('data', datatypes.Array(*(3,4)))]\n builder = neural_network.NeuralNetworkBuilder(input_features,\n [('output', None)], disable_rank5_shape_mapping=True)\n builder.add_sin(name='sin', input_name='data', output_name='output')\n spec = builder.spec\n\n flexible_shape_utils.set_multiarray_ndshape_range(spec, feature_name='data',\n lower_bounds=[1,1], upper_bounds=[-1,5])\n\n shapes = [(3,4), (1,5), (60,5), (22,4), (5,3)]\n for s in shapes:\n x = np.random.rand(*s)\n expected = {'output': np.sin(x)}\n self._test_model(spec, {'data': x}, expected, useCPUOnly=True)\n\n def test_shape_flexibility_enumeration(self, rank=4):\n default_shape = tuple(np.random.randint(1, 15, size=rank))\n input_features = [('data', datatypes.Array(*default_shape))]\n builder = neural_network.NeuralNetworkBuilder(\n input_features=input_features,\n output_features=[('output', None)],\n disable_rank5_shape_mapping=True)\n builder.add_sin(name='sin', input_name='data', output_name='output')\n spec = builder.spec\n\n shapes = [tuple(np.random.randint(1, 15, size=rank)),\n tuple(np.random.randint(1, 15, size=rank))]\n flexible_shape_utils.add_multiarray_ndshape_enumeration(\n spec, feature_name='data', enumerated_shapes=shapes)\n\n shapes.append(default_shape)\n for s in shapes:\n x = np.random.rand(*s)\n expected = {'output': np.sin(x)}\n self._test_model(spec, {'data': x}, expected, useCPUOnly=True)\n\n def test_shape_flexibility_enumeration_rank3(self):\n self.test_shape_flexibility_enumeration(rank=3)\n\n def test_shape_flexibility_enumeration_rank2(self):\n self.test_shape_flexibility_enumeration(rank=2)\n\n def test_transpose_cpu(self):\n for rank in range(1, 6):\n axes = np.random.permutation(rank)\n axes = [axis - rank if np.random.choice([True, False]) else axis for axis in axes]\n input_shape = np.random.randint(low=2, high=6, size=rank)\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_transpose(name='TransposeND',\n axes=axes,\n input_name='data',\n output_name='output')\n\n x = np.random.rand(*input_shape)\n input = {'data': x}\n expected = {'output': np.transpose(x, axes)}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=True)\n\n\n def test_dynamic_weight_conv(self):\n\n input_dim = (1, 3, 16, 16)\n # weight layout: (output_channels, kernel_channels, height, width)\n weight_dim = (4, 3, 3, 3)\n output_dim = (1, 4, 14, 14)\n\n kernel_channels = input_dim[0]\n output_channels, kernel_channels, height, width = weight_dim\n \n input_features = [\n ('input', datatypes.Array(*input_dim)),\n ('weight', datatypes.Array(*weight_dim))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features,\n output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_convolution(\n name='two_input_conv_layer',\n kernel_channels=kernel_channels,\n output_channels=output_channels,\n height=height,\n width=width,\n stride_height=1,\n stride_width=1,\n border_mode='valid',\n groups=1,\n W=None,\n b=None,\n has_bias=False,\n input_name=['input', 'weight'],\n output_name='output')\n\n # Assigning everything to ones should cover the execution path\n # and engine failures, but is not a complete check on numerics.\n input_val = np.ones(input_dim)\n weight_val = np.ones(weight_dim)\n expected = np.ones(output_dim) * 27\n\n feed_dict = {'input': input_val, 'weight': weight_val}\n expected = {'output': expected}\n\n self._test_model(builder.spec, feed_dict, expected, useCPUOnly=True)\n self._test_model(builder.spec, feed_dict, expected, useCPUOnly=False)\n\n @pytest.mark.xfail\n def test_dynamic_weight_deconv(self):\n # Expect to fail in Core ML 3\n input_dim = (1, 1, 16, 16)\n # weight layout: (output_channels, kernel_channels, height, width)\n weight_dim = (1, 1, 3, 3)\n output_dim = (1, 1, 18, 18)\n output_channels, kernel_channels, height, width = weight_dim\n\n input_features = [\n ('data', datatypes.Array(*input_dim)),\n ('weight', datatypes.Array(*weight_dim))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features,\n output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_convolution(\n name='deconv',\n kernel_channels=kernel_channels,\n output_channels=output_channels,\n height=height,\n width=width,\n stride_height=1,\n stride_width=1,\n border_mode='valid',\n groups=1,\n W=None,\n b=None,\n has_bias=False,\n is_deconv=True,\n input_name=['data', 'weight'],\n output_name='output')\n\n input_val = np.ones(input_dim)\n weight_val = np.ones(weight_dim)\n expected = np.ones(output_dim) * 27\n\n feed_dict = {'data': input_val, 'weight': weight_val}\n expected = {'output': expected}\n\n self._test_model(builder.spec, feed_dict, expected)\n\n def test_batched_mat_mul_cpu(self, cpu_only=True):\n a_shapes = [(10,), (4, 10), (10,), (10,), (2, 3), (1, 3, 4),\n (1, 3, 1, 2, 3), (2, 3, 1, 3, 4)]\n b_shapes = [(10,), (10,), (10, 3), (2, 10, 3), (3, 4), (3, 2, 4, 5),\n (1, 4, 3, 2), (2, 1, 2, 4, 5)]\n out_shapes = [(1, 1), (4, 1), (1, 3), (2, 1, 3), (2, 4), (3, 2, 3, 5),\n (1, 3, 4, 2, 2), (2, 3, 2, 3, 5)]\n\n for a_shape, b_shape, outShape in zip(a_shapes, b_shapes, out_shapes):\n input_shapes = [a_shape, b_shape]\n input_features = [\n ('A', datatypes.Array(*input_shapes[0])),\n ('B', datatypes.Array(*input_shapes[1]))\n ]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_batched_mat_mul(name='batched_mat_mul',\n input_names=['A', 'B'],\n output_name='output',\n transpose_a=False,\n transpose_b=False)\n\n a = np.random.rand(*input_shapes[0])\n b = np.random.rand(*input_shapes[1])\n input_ = {'A': a, 'B': b}\n expected = {'output': np.array(np.matmul(a, b))}\n shape_dict = {'output': outShape}\n self._test_model(builder.spec, input_, expected, useCPUOnly=cpu_only,\n output_name_shape_dict=shape_dict)\n self.assertEqual(len(outShape), builder._get_rank('output'))\n\n def test_batched_mat_mul_gpu(self):\n self.test_batched_mat_mul_cpu(cpu_only=False)\n\n def test_batched_mat_mul_with_transposes_cpu(self, cpu_only=True):\n for transpose_a, transpose_b in itertools.product([True, False],\n [True, False]):\n a_shape = (3, 4)\n b_shape = (4, 5)\n a_shape = a_shape[::-1] if transpose_a else a_shape\n b_shape = b_shape[::-1] if transpose_b else b_shape\n input_shapes = [a_shape, b_shape]\n input_features = [\n ('A', datatypes.Array(*input_shapes[0])),\n ('B', datatypes.Array(*input_shapes[1]))\n ]\n\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n builder.add_batched_mat_mul(\n name='BatchedMatMul', input_names=['A', 'B'],\n output_name='output', transpose_a=transpose_a,\n transpose_b=transpose_b\n )\n a = np.random.rand(*input_shapes[0])\n b = np.random.rand(*input_shapes[1])\n inputs = {'A': a, 'B': b}\n a = a.T if transpose_a else a\n b = b.T if transpose_b else b\n expected = {'output': np.matmul(a, b)}\n\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n\n def test_batched_mat_mul_with_transposes_gpu(self):\n self.test_batched_mat_mul_with_transposes_cpu(cpu_only=False)\n\n def test_batched_mat_mul_single_input_cpu(self,\n model_precision=_MLMODEL_FULL_PRECISION,\n cpu_only=True):\n X1 = 11\n X2 = 23\n W = np.random.rand(X1, X2)\n bias = np.random.rand(X2)\n input_shapes = [(X1,), (5, X1), (2, 3, X1), (4, 1, X1), (12, 5, 8, X1),\n (2, 3, 1, 5, X1)]\n for input_shape in input_shapes:\n x = np.random.rand(*input_shape)\n np_out = np.matmul(x, W) + bias\n expected = {'output': np_out}\n\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_batched_mat_mul(name='batched_mat_mul',\n input_names=['data'],\n output_name='output',\n weight_matrix_rows=X1,\n weight_matrix_columns=X2,\n W=W, bias=bias)\n inputs = {'data': x}\n\n self._test_model(\n builder.spec, inputs, expected,\n model_precision=model_precision, useCPUOnly=cpu_only)\n\n def test_batched_mat_mul_single_input_half_precision_cpu(self):\n self.test_batched_mat_mul_single_input_cpu(\n model_precision=_MLMODEL_HALF_PRECISION,\n cpu_only=True)\n\n def test_batched_mat_mul_single_input_gpu(self):\n self.test_batched_mat_mul_single_input_cpu(model_precision=_MLMODEL_FULL_PRECISION, cpu_only=False)\n\n def test_embedding_nd_cpu(\n self, model_precision=_MLMODEL_FULL_PRECISION, use_cpu_only=True):\n vocab_size = 10\n embedding_size = 19\n W = np.random.rand(embedding_size, vocab_size)\n input_shapes = [(5, 1), (2, 3, 1), (4, 1, 1), (12, 5, 8, 1),\n (2, 3, 1, 5, 1)]\n for input_shape in input_shapes:\n x = np.random.randint(vocab_size, size=input_shape)\n\n np_out = np.take(np.transpose(W), np.squeeze(x, axis=-1), axis=0)\n expected = {'output': np_out}\n\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_embedding_nd(name='embedding_nd',\n input_name='data',\n output_name='output',\n vocab_size=vocab_size,\n embedding_size=embedding_size,\n W=W)\n\n input = {'data': x.astype(np.float32)}\n\n self._test_model(\n builder.spec, input, expected,\n model_precision=model_precision, useCPUOnly=use_cpu_only)\n\n def test_embedding_nd_half_precision_cpu(self):\n self.test_embedding_nd_cpu(\n model_precision=_MLMODEL_HALF_PRECISION, use_cpu_only=True)\n\n def test_embedding_nd_GPU(self):\n self.test_embedding_nd_cpu(\n model_precision=_MLMODEL_FULL_PRECISION, use_cpu_only=False)\n\n def test_embedding_nd_half_precision_GPU(self):\n self.test_embedding_nd_cpu(\n model_precision=_MLMODEL_HALF_PRECISION, use_cpu_only=False)\n\n def test_softmax_nan_bug_cpu(self):\n input_shape = [2,2]\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n for axis in [0,1]:\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_softmax_nd(name='softmax_nd', input_name='data',\n output_name='output', axis=axis)\n\n x = np.array([[0.5, 0.5],[1e8, 1e8]])\n input = {'data': x}\n y = np.exp(x - np.max(x, axis=axis, keepdims=True))\n y = y / np.sum(y, axis=axis, keepdims=True)\n expected = {'output': y}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=True)\n\n def test_softmax_nd_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n for axis in range(-rank, rank):\n input_shape = np.random.randint(low=2, high=5, size=rank)\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_softmax_nd(name='softmax_nd', input_name='data',\n output_name='output', axis=axis)\n\n x = np.random.rand(*input_shape)\n input = {'data': x}\n y = np.exp(x - np.max(x, axis=axis, keepdims=True))\n y = y / np.sum(y, axis=axis, keepdims=True)\n expected = {'output': y}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_softmax_nd_gpu(self):\n self.test_softmax_nd_cpu(cpu_only=False)\n\n def test_concat_nd_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n for axis in range(-rank, rank):\n n_inputs = np.random.choice(range(2, 5))\n output_shape = np.random.randint(low=2, high=5, size=rank)\n output_shape[axis] = 0\n input_shapes = []\n input_features = []\n input_names = []\n for _ in range(n_inputs):\n input_shapes.append(np.copy(output_shape))\n input_shapes[-1][axis] = np.random.choice(range(2, 8))\n output_shape[axis] += input_shapes[-1][axis]\n for i, input_dim in enumerate(input_shapes):\n input_name = 'input_%s' % str(i)\n input_names.append(input_name)\n input_features.append((input_name, datatypes.Array(*input_dim)))\n\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_concat_nd(name='concat_nd', input_names=input_names,\n output_name='output', axis=axis)\n\n input_tensors = []\n for input_dim in input_shapes:\n input_tensors.append(np.random.rand(*input_dim))\n input = dict(zip(input_names, input_tensors))\n expected = {'output': np.concatenate(input_tensors, axis)}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_concat_nd_gpu(self):\n self.test_concat_nd_cpu(cpu_only=False)\n\n def test_fill_like_cpu(self, cpu_only=True):\n\n for rank in range(1, 6):\n target_shape = np.random.randint(low=2, high=6, size=rank)\n value = float(np.random.rand())\n\n input_features = [('tensor', datatypes.Array(*target_shape))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)],\n disable_rank5_shape_mapping=True)\n\n builder.add_fill_like(name='fill_like', input_name='tensor',\n output_name='output', value=value)\n\n tensor = np.random.rand(*target_shape)\n input = {'tensor': tensor}\n expected = {'output': np.zeros(target_shape) + value}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_fill_like_gpu(self):\n self.test_fill_like_cpu(cpu_only=False)\n\n def test_fill_static_cpu(self, cpu_only=True):\n\n for rank in range(1, 6):\n shape = np.random.randint(low=2, high=8, size=rank)\n\n input_features = [('data', datatypes.Array(*shape))]\n value = float(np.random.rand())\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)],\n disable_rank5_shape_mapping=True)\n builder.add_fill_static(name='fill_static', output_name='tmp',\n output_shape=list(shape), value=value)\n\n builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')\n\n data = np.random.rand(*shape)\n input = {'data': data}\n expected = {'output': data + value}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n self.assertEqual(len(shape), builder._get_rank('output'))\n\n def test_fill_static_gpu(self):\n self.test_fill_static_cpu(cpu_only=False)\n\n def test_fill_dynamic_cpu(self, cpu_only=True):\n\n for rank in range(1, 6):\n input_shape = np.random.randint(low=2, high=8, size=rank)\n value = float(np.random.rand())\n\n input_features = [('shape', datatypes.Array(len(input_shape)))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)],\n disable_rank5_shape_mapping=True)\n\n builder.add_fill_dynamic(name='fill_dynamic', input_name='shape',\n output_name='output', value=value)\n\n input = {'shape': np.array(input_shape, dtype='float')}\n expected = {'output': np.zeros(input_shape) + value}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n self.assertEqual(builder._get_rank('output'), -1)\n\n def test_fill_dynamic_gpu(self):\n self.test_fill_dynamic_cpu(cpu_only=False)\n\n def test_broadcast_to_like_cpu(self, cpu_only=True):\n\n for rank in range(1, 6):\n input_shape = np.random.randint(low=2, high=8, size=rank)\n mask = [np.random.choice([True, False, False]) for _ in range(rank)]\n input_shape = np.where(mask, 1, input_shape)\n\n target_rank = np.random.randint(low=rank, high=6)\n target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)\n else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]\n\n input_features = [('data', datatypes.Array(*input_shape)),\n ('tensor', datatypes.Array(*target_shape))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)],\n disable_rank5_shape_mapping=True)\n\n builder.add_broadcast_to_like(name='broadcast_to_like',\n input_names=['data', 'tensor'],\n output_name='output')\n\n data = np.random.rand(*input_shape)\n tensor = np.random.rand(*target_shape)\n inputs = {'data': data, 'tensor': tensor}\n expected = {'output': np.broadcast_to(data, target_shape)}\n\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n\n def test_broadcast_to_like_gpu(self):\n self.test_broadcast_to_like_cpu(cpu_only=False)\n\n def test_broadcast_to_static_cpu(self, cpu_only=True):\n\n for rank in range(1, 6):\n input_shape = np.random.randint(low=2, high=8, size=rank)\n mask = [np.random.choice([True, False, False]) for _ in range(rank)]\n input_shape = np.where(mask, 1, input_shape)\n\n target_rank = np.random.randint(low=rank, high=6)\n target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)\n else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]\n\n input_features = [('data', datatypes.Array(*input_shape))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)],\n disable_rank5_shape_mapping=True)\n\n builder.add_broadcast_to_static(name='broadcast_to_static',\n input_name='data',\n output_name='output',\n output_shape=list(target_shape))\n\n data = np.random.rand(*input_shape)\n input = {'data': data}\n expected = {'output': np.broadcast_to(data, target_shape)}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n self.assertEqual(target_rank, builder._get_rank('output'))\n\n def test_broadcast_to_static_gpu(self):\n self.test_broadcast_to_static_cpu(cpu_only=False)\n\n def test_broadcast_to_dynamic_cpu(self, cpu_only=True):\n\n for rank in range(1, 6):\n input_shape = np.random.randint(low=2, high=8, size=rank)\n mask = [np.random.choice([True, False, False]) for _ in range(rank)]\n input_shape = np.where(mask, 1, input_shape)\n\n target_rank = np.random.randint(low=rank, high=6)\n target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)\n else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]\n\n input_features = [('data', datatypes.Array(*input_shape)),\n ('shape', datatypes.Array(len(target_shape)))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)],\n disable_rank5_shape_mapping=True)\n\n builder.add_broadcast_to_dynamic(name='broadcast_to_dynamic',\n input_names=['data', 'shape'],\n output_name='output')\n\n data = np.random.rand(*input_shape)\n inputs = {'data': data, 'shape': np.array(target_shape, dtype='float')}\n expected = {'output': np.broadcast_to(data, target_shape)}\n\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n self.assertEqual(builder._get_rank('output'), -1)\n\n def test_broadcast_to_dynamic_gpu(self):\n self.test_broadcast_to_dynamic_cpu(cpu_only=False)\n\n # Test Rank being set to unknown when one of the input rank is unknown\n # For max rank case\n def test_unknown_rank(self, cpu_only=True):\n\n for rank in range(1, 6):\n input_shape = np.random.randint(low=2, high=8, size=rank)\n mask = [np.random.choice([True, False, False]) for _ in range(rank)]\n input_shape = np.where(mask, 1, input_shape)\n\n target_rank = np.random.randint(low=rank, high=6)\n target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)\n else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]\n\n input_features = [('x', datatypes.Array(*input_shape)),\n ('shape', datatypes.Array(len(target_shape)))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)],\n disable_rank5_shape_mapping=True)\n\n builder.add_broadcast_to_dynamic(name='broadcast_to_dynamic',\n input_names=['x', 'shape'],\n output_name='y')\n\n condition = np.random.randint(0, 2, input_shape).astype(np.float32)\n builder.add_load_constant_nd(name='load_constant_condition',\n output_name='condition',\n constant_value=condition,\n shape=input_shape)\n\n builder.add_where_broadcastable(name='where',\n input_names=['condition', 'x', 'y'],\n output_name='output')\n \n self.assertEqual(builder._get_rank('output'), -1)\n\n\n def test_trigonometry_cpu(self, cpu_only=True):\n\n ops = ['sin', 'cos', 'tan',\n 'asin', 'acos', 'atan',\n 'sinh', 'cosh', 'tanh',\n 'asinh', 'acosh', 'atanh']\n\n for op in ops:\n for rank in range(1, 6):\n shape = np.random.randint(low=2, high=8, size=rank)\n input_features = [('data', datatypes.Array(*shape))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)], disable_rank5_shape_mapping=True)\n\n x = np.random.rand(*shape)\n\n if op == 'sin':\n builder.add_sin(name=op, input_name='data', output_name='output')\n expected = {'output': np.sin(x)}\n elif op == 'cos':\n builder.add_cos(name=op, input_name='data', output_name='output')\n expected = {'output': np.cos(x)}\n elif op == 'tan':\n builder.add_tan(name=op, input_name='data', output_name='output')\n expected = {'output': np.tan(x)}\n elif op == 'asin':\n builder.add_asin(name=op, input_name='data', output_name='output')\n expected = {'output': np.arcsin(x)}\n elif op == 'acos':\n builder.add_acos(name=op, input_name='data', output_name='output')\n expected = {'output': np.arccos(x)}\n elif op == 'atan':\n builder.add_atan(name=op, input_name='data', output_name='output')\n expected = {'output': np.arctan(x)}\n elif op == 'sinh':\n builder.add_sinh(name=op, input_name='data', output_name='output')\n expected = {'output': np.sinh(x)}\n elif op == 'cosh':\n builder.add_cosh(name=op, input_name='data', output_name='output')\n expected = {'output': np.cosh(x)}\n elif op == 'tanh':\n builder.add_tanh(name=op, input_name='data', output_name='output')\n expected = {'output': np.tanh(x)}\n elif op == 'asinh':\n builder.add_asinh(name=op, input_name='data', output_name='output')\n expected = {'output': np.arcsinh(x)}\n elif op == 'acosh':\n x = np.random.choice([10, np.e, 1], tuple(shape)).astype(np.float32)\n builder.add_acosh(name=op, input_name='data', output_name='output')\n expected = {'output': np.arccosh(x)}\n elif op == 'atanh':\n builder.add_atanh(name=op, input_name='data', output_name='output')\n expected = {'output': np.arctanh(x)}\n\n self._test_model(builder.spec, {'data': x}, expected, useCPUOnly=cpu_only)\n\n def test_trigonometry_gpu(self):\n self.test_trigonometry_cpu(cpu_only=False)\n\n def test_exp2_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n shape = np.random.randint(low=2, high=8, size=rank)\n input_features = [('data', datatypes.Array(*shape))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)],\n disable_rank5_shape_mapping=True)\n builder.add_exp2(name='exp2', input_name='data', output_name='output')\n\n x = np.random.rand(*shape)\n input = {'data': x}\n expected = {'output': np.exp2(x)}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_exp2_gpu(self):\n self.test_exp2_cpu(cpu_only=False)\n\n def test_elementwise_binary_cpu(self, cpu_only=True):\n input_names = ['A', 'B']\n test_cases = ['greater', 'less', 'equal', 'not_equal', 'greater_equal',\n 'less_equal', 'logical_and', 'logical_or', 'logical_xor',\n 'add', 'subtract', 'multiply', 'divide', 'power',\n 'maximum', 'minimum', 'floor_divide', 'mod']\n for test_case in test_cases:\n for _ in range(10):\n rank_a = np.random.randint(low=1, high=6)\n rank_b = np.random.randint(low=1, high=6)\n\n rank_out = max(rank_a, rank_b)\n\n shape_a = np.random.randint(low=2, high=8, size=rank_a)\n shape_b = np.random.randint(low=2, high=8, size=rank_b)\n\n for i in range(-1, -rank_out - 1, -1):\n dims = []\n if -i <= rank_a: dims.append(shape_a[i])\n if -i <= rank_b: dims.append(shape_b[i])\n\n dim = np.random.choice(dims)\n if -i <= rank_a: shape_a[i] = np.random.choice([1, dim])\n if -i <= rank_b: shape_b[i] = np.random.choice([1, dim])\n\n input_shapes = [shape_a, shape_b]\n input_features = [('A', datatypes.Array(*input_shapes[0])),\n ('B', datatypes.Array(*input_shapes[1]))]\n\n builder = neural_network.NeuralNetworkBuilder(input_features, [\n ('output', None)], disable_rank5_shape_mapping=True)\n\n func = getattr(np, test_case)\n if test_case == 'greater':\n builder.add_greater_than(test_case, input_names=input_names,\n output_name='output')\n elif test_case == 'less':\n builder.add_less_than(test_case, input_names=input_names,\n output_name='output')\n elif test_case == 'equal':\n builder.add_equal(test_case, input_names=input_names,\n output_name='output')\n elif test_case == 'not_equal':\n builder.add_not_equal(test_case, input_names=input_names,\n output_name='output')\n elif test_case == 'greater_equal':\n builder.add_greater_than(test_case, input_names=input_names,\n output_name='output',\n use_greater_than_equal=True)\n elif test_case == 'less_equal':\n builder.add_less_than(test_case, input_names=input_names,\n output_name='output',\n use_less_than_equal=True)\n elif test_case == 'logical_and':\n builder.add_logical(test_case, input_names=input_names,\n output_name='output', mode='AND')\n elif test_case == 'logical_or':\n builder.add_logical(test_case, input_names=input_names,\n output_name='output', mode='OR')\n elif test_case == 'logical_xor':\n builder.add_logical(test_case, input_names=input_names,\n output_name='output', mode='XOR')\n elif test_case == 'add':\n builder.add_add_broadcastable(test_case, input_names=input_names,\n output_name='output')\n elif test_case == 'subtract':\n builder.add_subtract_broadcastable(test_case,\n input_names=input_names,\n output_name='output')\n elif test_case == 'multiply':\n builder.add_multiply_broadcastable(test_case,\n input_names=input_names,\n output_name='output')\n elif test_case == 'divide':\n builder.add_divide_broadcastable(test_case,\n input_names=input_names,\n output_name='output')\n elif test_case == 'power':\n builder.add_pow_broadcastable(test_case,\n input_names=input_names,\n output_name='output')\n elif test_case == 'maximum':\n builder.add_max_broadcastable(test_case,\n input_names=input_names,\n output_name='output')\n elif test_case == 'minimum':\n builder.add_min_broadcastable(test_case,\n input_names=input_names,\n output_name='output')\n elif test_case == 'floor_divide':\n builder.add_floor_div_broadcastable(test_case,\n input_names=input_names,\n output_name='output')\n elif test_case == 'mod':\n builder.add_mod_broadcastable(test_case,\n input_names=input_names,\n output_name='output')\n a = np.random.rand(*input_shapes[0])\n b = np.random.rand(*input_shapes[1])\n input = {'A': a, 'B': b}\n expected = {'output': func(a, b, dtype=np.float32)}\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_elementwise_binary_gpu(self):\n self.test_elementwise_binary_cpu(cpu_only=False)\n\n def test_elementwise_boolean_unary_cpu(self, cpu_only=True):\n input_names = ['input']\n shapes = [(1, 2, 3, 1), (3, 1, 2, 1, 2), (1, 2, 1, 3), (2, 3),\n (2, 1, 1), (2, 3, 4), (2, 4), (1,), (1,)]\n test_cases = ['greater', 'less', 'equal', 'not_equal', 'greater_equal',\n 'less_equal']\n for test_case in test_cases:\n for shape in shapes:\n input_features = [('input', datatypes.Array(*shape))]\n b = np.random.rand()\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)],\n disable_rank5_shape_mapping=True)\n\n func = getattr(np, test_case)\n if test_case == 'greater':\n builder.add_greater_than(test_case, input_names=input_names,\n output_name='output', alpha=b)\n elif test_case == 'less':\n builder.add_less_than(test_case, input_names=input_names,\n output_name='output', alpha=b)\n elif test_case == 'equal':\n builder.add_equal(test_case, input_names=input_names,\n output_name='output', alpha=b)\n elif test_case == 'not_equal':\n builder.add_not_equal(test_case, input_names=input_names,\n output_name='output', alpha=b)\n elif test_case == 'greater_equal':\n builder.add_greater_than(test_case, input_names=input_names,\n output_name='output',\n use_greater_than_equal=True,\n alpha=b)\n elif test_case == 'less_equal':\n builder.add_less_than(test_case, input_names=input_names,\n output_name='output',\n use_less_than_equal=True, alpha=b)\n\n a = np.random.rand(*shape)\n input = {'input': a}\n expected = {'output': func(a, b, dtype=np.float32)}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_elementwise_boolean_unary_gpu(self):\n self.test_elementwise_boolean_unary_cpu(cpu_only=False)\n\n def test_logical_not_cpu(self, cpu_only=True):\n input_names = ['input']\n shapes = [(1, 2, 3, 1), (3, 1, 2, 1, 2), (1, 2, 1, 3), (2, 3),\n (2, 1, 1), (2, 3, 4), (2, 4), (1,), (1,)]\n for shape in shapes:\n input_features = [('input', datatypes.Array(*shape))]\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)],\n disable_rank5_shape_mapping=True)\n builder.add_logical('logical_not', input_names=input_names,\n output_name='output', mode='NOT')\n\n a = np.random.rand(*shape)\n input = {'input': a}\n expected = {'output': np.logical_not(a)}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_logical_not_gpu(self):\n self.test_logical_not_cpu(cpu_only=False)\n\n def test_stack_cpu(self, cpu_only=True):\n for input_rank in range(1, 5):\n for axis in range(-input_rank - 1, input_rank + 1):\n n_inputs = np.random.choice(range(2, 5))\n input_shape = np.random.randint(low=2, high=5, size=input_rank)\n input_features = []\n input_names = []\n for i in range(n_inputs):\n input_name = 'input_%s' % str(i)\n input_names.append(input_name)\n input_features.append(\n (input_name, datatypes.Array(*input_shape)))\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_stack(name='stack', input_names=input_names,\n output_name='output', axis=axis)\n\n input_tensors = []\n for _ in range(n_inputs):\n input_tensors.append(np.random.rand(*input_shape))\n input = dict(zip(input_names, input_tensors))\n expected = {'output': np.stack(input_tensors, axis)}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n self.assertEqual(input_rank + 1, builder._get_rank('output'))\n\n def test_stack_gpu(self):\n self.test_stack_cpu(cpu_only=False)\n\n def test_ceil_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n shape = np.random.randint(low=2, high=8, size=rank)\n input_features = [('data', datatypes.Array(*shape))]\n output_features = [('output', datatypes.Array(*shape))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_ceil(name='ceil', input_name='data', output_name='output')\n\n x = np.random.rand(*shape)\n inputs = {'data': x}\n expected = {'output': np.ceil(x)}\n\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n self.assertEqual(rank, builder._get_rank('output'))\n\n def test_ceil_gpu(self):\n self.test_ceil_cpu(cpu_only=False)\n\n def test_floor_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n shape = np.random.randint(low=2, high=8, size=rank)\n input_features = [('data', datatypes.Array(*shape))]\n output_features = [('output', datatypes.Array(*shape))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_floor(name='floor', input_name='data', output_name='output')\n\n x = np.random.rand(*shape)\n inputs = {'data': x}\n expected = {'output': np.floor(x)}\n\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n\n def test_floor_gpu(self):\n self.test_floor_cpu(cpu_only=False)\n\n def test_round_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n shape = np.random.randint(low=2, high=8, size=rank)\n input_features = [('data', datatypes.Array(*shape))]\n output_features = [('output', datatypes.Array(*shape))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_round(name='round', input_name='data', output_name='output')\n\n x = np.float32(np.random.rand(*shape) * np.random.randint(low=-100, high=101))\n inputs = {'data': x}\n expected = {'output': np.around(x)}\n\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n\n def test_round_gpu(self):\n self.test_round_cpu(cpu_only=False)\n\n def test_sign_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n shape = np.random.randint(low=2, high=8, size=rank)\n input_features = [('data', datatypes.Array(*shape))]\n output_features = [('output', datatypes.Array(*shape))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_sign(name='sign', input_name='data', output_name='output')\n\n x = np.random.choice([-np.random.rand(1), 0.0, np.random.rand(1)],\n tuple(shape)).astype(np.float32)\n inputs = {'data': x}\n expected = {'output': np.sign(x)}\n\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n\n def test_sign_gpu(self):\n self.test_sign_cpu(cpu_only=False)\n\n def test_clip_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n shape = np.random.randint(low=2, high=6, size=rank)\n input_features = [('data', datatypes.Array(*shape))]\n output_features = [('output', datatypes.Array(*shape))]\n\n x = np.random.rand(*shape)\n min_value = np.percentile(x, 25)\n max_value = np.percentile(x, 75)\n input = {'data': x}\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True)\n builder.add_clip(name='clip', input_name='data', output_name='output',\n min_value=min_value, max_value=max_value)\n\n expected = {'output': np.clip(x, min_value, max_value)}\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_clip_gpu(self):\n self.test_clip_cpu(cpu_only=False)\n\n def test_split_nd_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n for axis in range(-rank, rank):\n n_outputs = np.random.choice(range(2, 4))\n input_shape = np.random.randint(low=2, high=5, size=rank)\n input_shape[axis] = 0\n output_shapes = []\n output_features = []\n output_names = []\n almost_equal = random.choice([True, False])\n remainder = np.random.choice(\n range(1, n_outputs)) if almost_equal else 0\n value = np.random.choice(range(2, 5))\n for k in range(n_outputs):\n output_shapes.append(np.copy(input_shape))\n output_shapes[-1][\n axis] = value + 1 if k < remainder else value\n input_shape[axis] += output_shapes[-1][axis]\n\n for i in range(n_outputs):\n output_name = 'output_%s' % str(i)\n output_names.append(output_name)\n output_features.append(\n (output_name, None))\n\n input_features = [('data', datatypes.Array(*input_shape))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_split_nd(name='split_nd', input_name='data',\n output_names=output_names, axis=axis,\n num_splits=n_outputs)\n\n x = np.random.rand(*input_shape)\n input = {'data': x}\n expected = dict(\n zip(\n output_names, np.array_split(x, n_outputs, axis=axis)\n if almost_equal else np.split(x, n_outputs, axis=axis)\n )\n ) # Explicitly trying to compare against both versions of numpy split\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n for output_ in output_names:\n self.assertEqual(rank, builder._get_rank(output_))\n\n def test_split_nd_gpu(self):\n self.test_split_nd_cpu(cpu_only=False)\n\n def test_split_nd_with_split_sizes_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n for axis in range(-rank, rank):\n n_outputs = np.random.choice(range(2, 4))\n input_shape = np.random.randint(low=2, high=5, size=rank)\n input_shape[axis] = 0\n output_shapes, output_features, output_names = [], [], []\n sections, split_sizes = [], []\n for _ in range(n_outputs):\n output_shapes.append(np.copy(input_shape))\n output_shapes[-1][axis] = np.random.choice(range(2, 5))\n input_shape[axis] += output_shapes[-1][axis]\n sections.append(input_shape[axis])\n split_sizes.append(output_shapes[-1][axis])\n\n sections.pop()\n for i in range(n_outputs):\n output_name = 'output_%s' % str(i)\n output_names.append(output_name)\n output_features.append(\n (output_name, None))\n\n input_features = [('data', datatypes.Array(*input_shape))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_split_nd(name='split_nd', input_name='data',\n output_names=output_names, axis=axis,\n split_sizes=split_sizes)\n\n x = np.random.rand(*input_shape)\n input = {'data': x}\n expected = dict(\n zip(output_names, np.split(x, sections, axis=axis)))\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n for output_ in output_names:\n self.assertEqual(rank, builder._get_rank(output_))\n\n def test_split_nd_with_split_sizes_gpu(self):\n self.test_split_nd_with_split_sizes_cpu(cpu_only=False)\n\n def test_slice_static_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n for _ in range(200):\n input_shape = np.array([5 for _ in range(rank)])\n objs, strides, begin_masks, end_ids, end_masks, begin_ids = [], [], [], [], [], []\n for dim in range(rank):\n stride = random.choice([-3, -1, 1, 2])\n begin_mask = random.choice([True, False])\n end_mask = random.choice([True, False])\n length = 0\n while length <= 0:\n begin_id = np.random.randint(low=-input_shape[dim],\n high=input_shape[dim])\n end_id = np.random.randint(low=-input_shape[dim],\n high=input_shape[dim])\n obj = slice(None if begin_mask else begin_id,\n None if end_mask else end_id, stride)\n length = np.arange(input_shape[dim])[(obj,)].shape[0]\n\n objs.append(obj), strides.append(stride), begin_masks.append(\n begin_mask)\n end_masks.append(end_mask), begin_ids.append(\n begin_id), end_ids.append(end_id)\n\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_slice_static('slice_static', 'data', 'output',\n begin_ids=begin_ids, end_ids=end_ids, strides=strides,\n begin_masks=begin_masks, end_masks=end_masks)\n\n x = np.random.rand(*input_shape)\n inputs = {'data': x}\n expected = {'output': x[tuple(objs)]}\n\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n self.assertEqual(rank, builder._get_rank('output'))\n\n def test_slice_static_gpu(self):\n self.test_slice_static_cpu(cpu_only=False)\n\n def test_slice_dynamic_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n input_shape = np.array([5 for _ in range(rank)])\n objs, strides, begin_masks, end_ids, end_masks, begin_ids = [], [], [], [], [], []\n for dim in range(rank):\n stride = random.choice([-3, -1, 1, 2])\n begin_mask = random.choice([True, False])\n end_mask = random.choice([True, False])\n length = 0\n while length <= 0:\n begin_id = np.random.randint(low=-input_shape[dim],\n high=input_shape[dim])\n end_id = np.random.randint(low=-input_shape[dim],\n high=input_shape[dim])\n obj = slice(None if begin_mask else begin_id,\n None if end_mask else end_id, stride)\n length = np.arange(input_shape[dim])[(obj,)].shape[0]\n\n objs.append(obj), strides.append(stride), begin_masks.append(\n begin_mask)\n end_masks.append(end_mask), begin_ids.append(\n begin_id), end_ids.append(end_id)\n\n # test different number of inputs, from 2 inputs up to 6 inputs\n # when num_inputs == 2, begin_ids are inputs, rest are read from parameters\n # when num_inputs == 6, all read from inputs, none are read from parameters\n for num_inputs in [2, 3, 4, 5, 6]:\n x = np.random.rand(*input_shape)\n\n input_features = [('data', datatypes.Array(*input_shape))]\n input_names = ['data']\n inputs = dict()\n inputs['data'] = x\n\n if num_inputs == 2:\n input_features = [('data', datatypes.Array(*input_shape)),\n ('begin_ids', datatypes.Array(len(begin_ids)))]\n input_names = ['data', 'begin_ids']\n inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)\n elif num_inputs == 3:\n input_features = [('data', datatypes.Array(*input_shape)),\n ('begin_ids', datatypes.Array(len(begin_ids))),\n ('end_ids', datatypes.Array(len(end_ids)))]\n input_names = ['data', 'begin_ids', 'end_ids']\n inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)\n inputs['end_ids'] = np.array(end_ids, dtype=np.int32)\n elif num_inputs == 4:\n input_features = [('data', datatypes.Array(*input_shape)),\n ('begin_ids', datatypes.Array(len(begin_ids))),\n ('end_ids', datatypes.Array(len(end_ids))),\n ('strides', datatypes.Array(len(strides)))]\n input_names = ['data', 'begin_ids', 'end_ids', 'strides']\n inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)\n inputs['end_ids'] = np.array(end_ids, dtype=np.int32)\n inputs['strides'] = np.array(strides, dtype=np.int32)\n elif num_inputs == 5:\n input_features = [('data', datatypes.Array(*input_shape)),\n ('begin_ids', datatypes.Array(len(begin_ids))),\n ('end_ids', datatypes.Array(len(end_ids))),\n ('strides', datatypes.Array(len(strides))),\n ('begin_masks', datatypes.Array(len(begin_masks)))]\n input_names = ['data', 'begin_ids', 'end_ids', 'strides', 'begin_masks']\n inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)\n inputs['end_ids'] = np.array(end_ids, dtype=np.int32)\n inputs['strides'] = np.array(strides, dtype=np.int32)\n inputs['begin_masks'] = np.array(begin_masks, dtype=np.int32)\n elif num_inputs == 6:\n input_features = [('data', datatypes.Array(*input_shape)),\n ('begin_ids', datatypes.Array(len(begin_ids))),\n ('end_ids', datatypes.Array(len(end_ids))),\n ('strides', datatypes.Array(len(strides))),\n ('begin_masks', datatypes.Array(len(begin_masks))),\n ('end_masks', datatypes.Array(len(end_masks)))]\n input_names = ['data', 'begin_ids', 'end_ids',\n 'strides', 'begin_masks', 'end_masks']\n inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)\n inputs['end_ids'] = np.array(end_ids, dtype=np.int32)\n inputs['strides'] = np.array(strides, dtype=np.int32)\n inputs['begin_masks'] = np.array(begin_masks, dtype=np.int32)\n inputs['end_masks'] = np.array(end_masks, dtype=np.int32)\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)],\n disable_rank5_shape_mapping=True)\n\n if num_inputs == 2:\n builder.add_slice_dynamic('slice_dynamic', input_names, 'output',\n end_ids=end_ids, strides=strides,\n begin_masks=begin_masks, end_masks=end_masks)\n elif num_inputs == 3:\n builder.add_slice_dynamic('slice_dynamic', input_names, 'output',\n strides=strides, begin_masks=begin_masks,\n end_masks=end_masks)\n elif num_inputs == 4:\n builder.add_slice_dynamic('slice_dynamic', input_names, 'output',\n begin_masks=begin_masks, end_masks=end_masks)\n elif num_inputs == 5:\n builder.add_slice_dynamic('slice_dynamic', input_names, 'output',\n end_masks=end_masks)\n elif num_inputs == 6:\n builder.add_slice_dynamic('slice_dynamic', input_names, 'output')\n\n expected = {'output': x[tuple(objs)]}\n\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n self.assertEqual(rank, builder._get_rank('output'))\n\n def test_slice_dynamic_gpu(self):\n self.test_slice_dynamic_cpu(cpu_only=False)\n\n def test_tile_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n input_shape = np.random.randint(low=2, high=5, size=rank)\n for rep_rank in range(1,rank+1):\n reps = list(np.random.randint(low=1, high=9, size=rep_rank))\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n\n builder.add_tile('Tile', 'data', 'output', reps)\n\n x = np.random.rand(*input_shape)\n input = {'data': x}\n expected = {'output': np.tile(x, reps)}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_tile_gpu(self):\n self.test_tile_cpu(cpu_only=False)\n\n def test_sliding_windows_cpu(self, cpu_only=True):\n def numpy_sliding_windows(a, np_axis, np_size, np_step):\n n = (a.shape[np_axis] - np_size) // np_step + 1\n shape = list(a.shape)\n shape[np_axis] = n\n if np_axis < 0:\n np_axis += len(shape)\n shape.insert(np_axis + 1, np_size)\n strides = list(a.strides)\n effstride = strides[np_axis] * np_step\n strides.insert(np_axis, effstride)\n return np.lib.stride_tricks.as_strided(a, shape, strides)\n\n for rank in range(1, 5):\n for axis in range(-rank, rank):\n input_shape = np.random.randint(low=2, high=5, size=rank)\n output_shape = list(input_shape)\n window_size = np.random.randint(low=1, high=input_shape[axis])\n\n length = 0\n while length <= 0:\n step = np.random.randint(low=1, high=input_shape[axis])\n length = (input_shape[axis] - window_size) // step + 1\n\n output_shape[axis] = length\n\n pos_axis = axis if axis >= 0 else axis + rank\n output_shape.insert(pos_axis + 1, window_size)\n\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_sliding_windows('sliding_windows',\n input_name='data',\n output_name='output',\n axis=axis,\n window_size=window_size,\n step=step)\n\n x = np.random.rand(*input_shape)\n input = {'data': x}\n expected = {'output': numpy_sliding_windows(x, axis, window_size, step)}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n self.assertEqual(rank+1, builder._get_rank('output'))\n\n def test_sliding_windows_gpu(self):\n self.test_sliding_windows_cpu(cpu_only=False)\n\n def test_range_static_cpu(self, cpu_only=True):\n\n params = [(-10.4, 23, 12.2), (0, 1000, 1), (50.5, 90.5, 1.5), (5, 8, 2),\n (5, 8, 98), (5, 8, 1.5), (10, 5, -0.6), (24, -65, -2)]\n\n for param in params:\n start, end, step = param\n input_features = [('multiplicative_input', datatypes.Array(1))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)],\n disable_rank5_shape_mapping=True)\n\n builder.add_range_static('range_static', 'output_range',\n end=end, start=start, step=step)\n builder.add_multiply_broadcastable(\n name='multiply_broadcastable',\n input_names=['multiplicative_input', 'output_range'],\n output_name='output')\n\n # save the model\n model_dir = tempfile.mkdtemp()\n model_path = os.path.join(model_dir, 'test_layer.mlmodel')\n coremltools.utils.save_spec(builder.spec, model_path)\n\n inputs = dict()\n inputs['multiplicative_input'] = np.ones((1,), dtype=np.float64)\n expected = {'output': np.arange(start, end, step)}\n\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n self.assertEqual(1, builder._get_rank('output'))\n\n def test_range_static_gpu(self):\n self.test_range_static_cpu(cpu_only=False)\n\n def test_range_dynamic_cpu(self, cpu_only=True):\n params = [(-10.4, 23, 12.2), (0, 1000, 1), (50.5, 90.5, 1.5), (5, 8, 2),\n (5, 8, 98), (5, 8, 1.5), (10, 5, -0.6), (24, -65, -2)]\n\n # input size == 1: end is input, start and step are read from parameters\n # input size == 2: end, start are inputs, step is read from parameters\n # input size == 3: start, end, step are all inputs, none of the parameters are used.\n for num_inputs in [1, 2, 3]:\n for param in params:\n inputs = dict()\n start, end, step = param\n\n if num_inputs == 1:\n input_features = [('end', datatypes.Array(1))]\n elif num_inputs == 2:\n input_features = [('end', datatypes.Array(1)),\n ('start', datatypes.Array(1))]\n elif num_inputs == 3:\n input_features = [('end', datatypes.Array(1)),\n ('start', datatypes.Array(1)),\n ('step', datatypes.Array(1))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)],\n disable_rank5_shape_mapping=True)\n\n if num_inputs == 1:\n inputs['end'] = end * np.ones((1,), dtype=np.float64)\n builder.add_range_dynamic('range_dynamic',\n output_name='output',\n input_names=['end'],\n start=start, step=step)\n elif num_inputs == 2:\n inputs['end'] = end * np.ones((1,), dtype=np.float64)\n inputs['start'] = start * np.ones((1,), dtype=np.float64)\n builder.add_range_dynamic('range_dynamic',\n output_name='output',\n input_names=['end', 'start'],\n step=step)\n elif num_inputs == 3:\n inputs['end'] = end * np.ones((1,), dtype=np.float64)\n inputs['start'] = start * np.ones((1,), dtype=np.float64)\n inputs['step'] = step * np.ones((1,), dtype=np.float64)\n builder.add_range_dynamic('range_dynamic',\n output_name='output',\n input_names=['end', 'start', 'step'])\n\n expected = {'output': np.arange(start, end, step)}\n\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n self.assertEqual(1, builder._get_rank('output'))\n\n def test_range_dynamic_gpu(self):\n self.test_range_dynamic_cpu(cpu_only=False)\n\n def test_linear_activation_different_ranks_cpu(self, cpu_only=True):\n for input_dim in [(10, 15), (10, 15, 2, 3),\n (10, 2, 4, 15, 1, 4), (6,)]:\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', datatypes.Array(*input_dim))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_activation(name='activation',\n non_linearity='LINEAR',\n input_name='data',\n output_name='output', params=[34.0, 67.0])\n\n x = np.random.rand(*input_dim)\n input = {'data': x}\n expected = {'output': 34.0 * x + 67.0}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_linear_activation_different_ranks_gpu(self):\n self.test_linear_activation_different_ranks_cpu(cpu_only=False)\n\n def test_topk_cpu(self, cpu_only=True):\n test_input_shapes = [(9,), (8, 6), (9, 8, 10), (5, 9, 7, 9), (12, 8, 6, 6, 7)]\n K = [3, 5]\n axes = [[0], [0, 1], [1, 2], [0, 3, 1], [1, 3, 4]]\n\n for ii, input_shape in enumerate(test_input_shapes):\n for k in K:\n for n_inputs in [1, 2]:\n for bottom_k_flag in [False, True]:\n for axis in axes[ii]:\n for negative_axis in [False, True]:\n\n if negative_axis:\n axis = axis - len(input_shape)\n\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('values', None), ('indices', None)]\n\n input_names = ['data']\n output_names = ['values', 'indices']\n\n if n_inputs == 2:\n input_names.append('k_in')\n input_features.append(('k_in', datatypes.Array(1)))\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n if n_inputs == 2:\n builder.add_topk('topk', input_names, output_names,\n axis=axis, use_bottom_k=bottom_k_flag)\n else:\n builder.add_topk('topk', input_names, output_names,\n k=k, axis=axis, use_bottom_k=bottom_k_flag)\n\n data = np.random.randint(low=0, high=int(np.prod(input_shape)), size=input_shape)\n data = data.astype(np.float32)\n\n input = {'data': data}\n if n_inputs == 2:\n input['k_in'] = k * np.ones([1], dtype=np.float32)\n\n # numpy reference values\n if bottom_k_flag:\n ref_indices = np.argsort(data, axis=axis)\n else:\n ref_indices = np.argsort(-data, axis=axis)\n\n slc = [slice(None)] * len(input_shape)\n slc[axis] = slice(0, k)\n ref_indices = ref_indices[tuple(slc)]\n ref_values = np.take_along_axis(data, ref_indices, axis=axis)\n expected = {'values': ref_values, 'indices': ref_indices}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_topk_gpu(self):\n self.test_topk_cpu(cpu_only=False)\n\n\n def test_const_pad_cpu(self, cpu_only=True):\n\n def get_reference(data, pads, value):\n with tf.Graph().as_default(), tf.Session() as sess:\n x = tf.placeholder(tf.float32, shape=data.shape)\n p = tf.placeholder(tf.int32, shape=pads.shape)\n y = tf.pad(x, p, mode='CONSTANT', constant_values=value)\n return sess.run(y, feed_dict={x: data, p: pads})\n\n value = 34.0\n shapes = [(3,), (4, 5), (2, 4, 5), (12, 6, 3, 5, 7), (1, 24, 2, 4, 8)]\n\n ctr = 0\n for shape in shapes:\n rank = len(shape)\n for force_zeros_in_end in [0, 2, 6]:\n for max_pad_value in range(1, 6):\n for n_inputs in [1, 2]:\n pads = np.random.randint(low=0, high=max_pad_value, size=(rank, 2))\n\n if force_zeros_in_end > 2 * rank:\n continue\n\n # pads = np.reshape(np.array([1,1,1,0,0,1]), (rank, 2))\n if force_zeros_in_end != 0:\n pads[-force_zeros_in_end:] = 0\n\n data = np.random.rand(*shape)\n reference = get_reference(data, pads, value)\n\n ctr += 1\n\n input_features = [('data', datatypes.Array(*shape))]\n output_features = [('output', None)]\n\n input_names = ['data']\n if n_inputs == 2:\n input_names.append('pads')\n input_features.append(('pads', datatypes.Array(2*rank,)))\n\n builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)\n if n_inputs == 2:\n builder.add_constant_pad('pad', input_names, 'output', value=value)\n else:\n builder.add_constant_pad('pad', input_names, 'output', value=value, pad_amounts=pads.flatten())\n\n input = {'data': data}\n if n_inputs == 2:\n input['pads'] = pads.flatten().astype(np.float)\n\n expected = {'output': reference}\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n\n def test_const_pad_gpu(self):\n self.test_const_pad_cpu(cpu_only=False)\n\n\n def test_const_pad_mode2_cpu(self, cpu_only=True):\n\n def get_reference(data, output_shape, value, left_pad=False):\n with tf.Graph().as_default(), tf.Session() as sess:\n x = tf.placeholder(tf.float32, shape=data.shape)\n p = tf.placeholder(tf.int32, shape=(len(output_shape), 2))\n y = tf.pad(x, p, mode='CONSTANT', constant_values=value)\n pads = np.zeros((len(output_shape), 2))\n if left_pad:\n pads[:, 0] = np.array(output_shape) - np.array(data.shape)\n else:\n pads[:, 1] = np.array(output_shape) - np.array(data.shape)\n\n return sess.run(y, feed_dict={x: data, p: pads})\n\n value = 34.0\n shapes = [(3,), (4, 5), (2, 4, 5), (12, 6, 3, 5, 7), (1, 24, 2, 4, 8)]\n out_shapes = [(5,), (4, 8), (2, 4, 10), (20, 6, 7, 10, 7), (5, 24, 10, 4, 10)]\n\n ctr = 0\n for ii, shape in enumerate(shapes):\n rank = len(shape)\n for left_pad in [True, False]:\n for n_inputs in [1, 2]:\n\n data = np.random.rand(*shape)\n reference = get_reference(data, out_shapes[ii], value, left_pad)\n\n pads = np.zeros((rank, 2))\n tmp = np.zeros((rank))\n\n for i in range(rank):\n if out_shapes[ii][i] == shape[i]:\n tmp[i] = 0\n else:\n tmp[i] = out_shapes[ii][i]\n\n if left_pad:\n pads[:, 0] = tmp\n else:\n pads[:, 1] = tmp\n\n ctr += 1\n\n input_features = [('data', datatypes.Array(*shape))]\n output_features = [('output', None)]\n\n input_names = ['data']\n if n_inputs == 2:\n input_names.append('pads')\n input_features.append(('pads', datatypes.Array(2*rank,)))\n\n builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)\n if n_inputs == 2:\n builder.add_constant_pad('pad', input_names, 'output', value=value, pad_to_given_output_size_mode=True)\n else:\n builder.add_constant_pad('pad', input_names, 'output', value=value, pad_amounts=pads.flatten(), pad_to_given_output_size_mode=True)\n\n input = {'data': data}\n if n_inputs == 2:\n input['pads'] = pads.flatten().astype(np.float)\n\n expected = {'output': reference}\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n\n def test_const_pad_mode2_gpu(self):\n self.test_const_pad_mode2_cpu(cpu_only=False)\n\n\n def test_nms_cpu(self, cpu_only=True):\n def _compute_iou_matrix(boxes):\n # input is (N,4), in order [center_w, center_h, width, height]\n assert len(boxes.shape) == 2\n assert boxes.shape[1] == 4\n boxes = boxes.astype(np.float)\n center_w, center_h, width, height = np.split(boxes, 4, axis=1) # outs are all (N,1)\n top = center_h + 0.5 * height\n bottom = center_h - 0.5 * height\n left = center_w - 0.5 * width\n right = center_w + 0.5 * width\n area = width * height\n\n hB = np.minimum(top, np.transpose(top))\n wB = np.minimum(right, np.transpose(right))\n hA = np.maximum(bottom, np.transpose(bottom))\n wA = np.maximum(left, np.transpose(left))\n\n intersection_area = np.maximum(0, hB - hA) * np.maximum(0, wB - wA)\n union_area = area + np.transpose(area) - intersection_area\n iou = intersection_area / union_area\n return iou\n\n def _nms_TF(boxes, scores, iou_threshold, score_threshold, per_class_suppression, M):\n # boxes is (B,N,4), in order [center_w, center_h, width, height]\n # scores is (B,N,C)\n # output shapes: (B,M,4), (B,M,C), (B,M), (B,)\n '''\n this is implementation of CoreML's NMS layer\n '''\n B, N, C = scores.shape\n\n iou_threshold = iou_threshold.astype(np.float32)\n score_threshold = score_threshold.astype(np.float32)\n\n # convert box ids to TF style\n center_w, center_h, width, height = np.split(boxes, 4, axis=-1) # outs are all (B,N,1)\n y1 = center_h - 0.5 * height\n y2 = center_h + 0.5 * height\n x1 = center_w - 0.5 * width\n x2 = center_w + 0.5 * width\n boxes_tf = np.concatenate((y1, x1, y2, x2), axis=-1) # (B,N,4)\n\n out1 = np.zeros((B, M, 4))\n out2 = np.zeros((B, M, C))\n out3 = -1 * np.ones((B, M))\n out4 = np.zeros((B,))\n\n for b in range(B):\n box_coord_matrix = boxes_tf[b, :, :] # (N,4)\n score_vector = np.max(scores[b, :, :], axis=-1) # (N,)\n if not per_class_suppression:\n # this is the simple case as TF directly supports it\n with tf.Graph().as_default(), tf.Session() as sess:\n box_coord_matrix_pl = tf.placeholder(tf.float32, shape=box_coord_matrix.shape)\n score_vector_pl = tf.placeholder(tf.float32, shape=score_vector.shape)\n ids_g = tf.image.non_max_suppression(box_coord_matrix_pl,\n score_vector_pl,\n max_output_size=M, iou_threshold=iou_threshold,\n score_threshold=score_threshold)\n\n ids = sess.run(ids_g, feed_dict={box_coord_matrix_pl: box_coord_matrix, score_vector_pl: score_vector})\n else:\n # this is slightly complicated as TF does not directly support it\n class_ids = np.argmax(scores[b, :, :], axis=-1) # (N,)\n sorted_score_ids = np.argsort(-score_vector)\n box_coord_matrix2 = np.take(box_coord_matrix, sorted_score_ids, axis=0)\n score_vector2 = np.take(score_vector, sorted_score_ids)\n class_ids = np.take(class_ids, sorted_score_ids)\n classes_seen = dict()\n ids_intermediate = np.array([], dtype=np.int)\n for n in range(N):\n if class_ids[n] in classes_seen:\n continue\n c = class_ids[n]\n classes_seen[c] = True\n current_class_ids = np.where(class_ids == c)[0]\n if len(current_class_ids) > 0:\n feed_in1 = np.take(box_coord_matrix2, current_class_ids, axis=0)\n feed_in2 = np.take(score_vector2, current_class_ids)\n\n with tf.Graph().as_default(), tf.Session() as sess:\n box_coord_matrix_pl = tf.placeholder(tf.float32, shape=feed_in1.shape)\n score_vector_pl = tf.placeholder(tf.float32, shape=feed_in2.shape)\n cur_ids_g = tf.image.non_max_suppression(box_coord_matrix_pl,\n score_vector_pl,\n max_output_size=M, iou_threshold=iou_threshold,\n score_threshold=score_threshold)\n cur_ids = sess.run(cur_ids_g, feed_dict={box_coord_matrix_pl: feed_in1,\n score_vector_pl: feed_in2})\n\n\n from_sort_ids = np.take(current_class_ids, cur_ids)\n ids_intermediate = np.append(ids_intermediate, from_sort_ids)\n ids_intermediate.sort()\n ids = np.take(sorted_score_ids, ids_intermediate)\n\n xx = len(ids)\n if xx == 0:\n ids = np.array([np.argmax(score_vector)])\n xx = 1\n if xx > M:\n ids = ids[:M]\n xx = len(ids)\n out1[b, :xx, :] = np.take(boxes[b, :, :], ids, axis=0)\n out2[b, :xx, :] = np.take(scores[b, :, :], ids, axis=0)\n out3[b, :xx] = ids\n out4[b] = xx\n\n return out1, out2, out3, out4\n\n iou_threshold_percentile = [0, 30, 80, 100]\n score_threshold_percentile_arr = [0, 40, 100]\n N_M_pairs_to_test = [[100, 48], [100, 112]] # N : boxes in, M: max boxes out\n\n number_of_test = 0\n for N_M in N_M_pairs_to_test:\n for B in [1, 5]:\n for C in [1, 7]:\n N, M = N_M\n\n boxes = np.random.rand(B, N, 4)\n scores = np.random.rand(B, N, C)\n\n iou_matrix = _compute_iou_matrix(boxes[0, :, :]) # (N,N)\n iou_matrix = iou_matrix[~np.eye(iou_matrix.shape[0], dtype=bool)].reshape(iou_matrix.shape[0], -1)\n\n for per_class_suppression in [False, True]:\n for iou_thresh in iou_threshold_percentile:\n for score_thresh in score_threshold_percentile_arr:\n for is_dynamic in [False, True]:\n\n if score_thresh == 0:\n score_threshold = np.min(scores) - 1\n elif score_thresh == 100:\n score_threshold = np.max(scores) + 1\n else:\n score_threshold = np.percentile(scores, score_thresh) + .01\n\n if iou_thresh == 0:\n iou_threshold = np.maximum(np.min(iou_matrix) - .01, 0.0)\n else:\n iou_threshold = np.percentile(iou_matrix, iou_thresh) + .01\n\n number_of_test += 1\n\n tf_boxes, tf_scores, tf_ids, tf_num_boxes = _nms_TF(boxes, scores, iou_threshold,\n score_threshold,\n per_class_suppression,\n M)\n expected = dict()\n expected['selected_boxes'] = tf_boxes\n expected['selected_scores'] = tf_scores\n expected['selected_box_ids'] = tf_ids\n expected['number_of_boxes'] = tf_num_boxes\n\n # define CoreML model\n\n input_features = [('boxes', datatypes.Array(B,N,4)), ('scores', datatypes.Array(B,N,C))]\n output_features = [('selected_boxes', None), ('selected_scores', None),\n ('selected_box_ids', None), ('number_of_boxes', None)]\n\n input_names = ['boxes', 'scores']\n if is_dynamic:\n input_names.extend(['iou_threshold', 'score_threshold', 'max_boxes'])\n input_features.append(('iou_threshold', datatypes.Array(1, )))\n input_features.append(('score_threshold', datatypes.Array(1, )))\n input_features.append(('max_boxes', datatypes.Array(1, )))\n\n builder = neural_network.NeuralNetworkBuilder(input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n input_dict = dict()\n input_dict['boxes'] = boxes\n input_dict['scores'] = scores\n\n if is_dynamic:\n builder.add_nms('nms', input_names,\n ['selected_boxes', 'selected_scores', 'selected_box_ids','number_of_boxes'],\n per_class_suppression=per_class_suppression)\n\n input_dict['iou_threshold'] = iou_threshold * np.ones([1], dtype=np.float)\n input_dict['score_threshold'] = score_threshold * np.ones([1], dtype=np.float)\n input_dict['max_boxes'] = M * np.ones([1], dtype=np.float)\n else:\n builder.add_nms('nms', input_names,\n ['selected_boxes', 'selected_scores', 'selected_box_ids','number_of_boxes'],\n iou_threshold=iou_threshold, score_threshold=score_threshold,\n max_boxes=M, per_class_suppression=per_class_suppression)\n\n self._test_model(builder.spec, input_dict, expected, useCPUOnly=cpu_only)\n\n\n def test_nms_gpu(self):\n self.test_nms_cpu(cpu_only=False)\n\n def test_rank_preserving_reshape(self):\n input_shapes = [(20, 10), (20, 10, 5), (10, 3, 5)]\n target_shapes = [(5, -1), (0, 2, 25), (25, 0, -1)]\n output_shapes = [(5, 40), (20, 2, 25), (25, 3, 2)]\n\n for i in range(len(input_shapes)):\n input_features = [('data', datatypes.Array(*input_shapes[i]))]\n output_features = [('output', None)]\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_rank_preserving_reshape(\n name='rank_preserving_reshape', input_name='data',\n output_name='output', output_shape=target_shapes[i])\n\n x = np.random.rand(*input_shapes[i])\n input = {'data': x}\n expected = {'output': np.reshape(x, output_shapes[i])}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=True)\n self.assertEqual(len(output_shapes[i]), builder._get_rank('output'))\n\n def test_expand_dims(self):\n input_shapes = [(10, 5), (10, 5), (10, 5), (10, 5), (10,)]\n axes = [(0, 1), (0, 2), (2, 0), (-2, -1), (1, 0, -2)]\n output_shapes = [(1, 1, 10, 5), (1, 10, 1, 5), (1, 10, 1, 5), (10, 5, 1, 1), (1, 1, 1, 10)]\n\n for i in range(len(input_shapes)):\n input_features = [('data', datatypes.Array(*input_shapes[i]))]\n output_features = [('output', None)]\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_expand_dims(\n name='expand_dims', input_name='data', output_name='output',\n axes=axes[i]\n )\n\n x = np.random.rand(*input_shapes[i])\n input = {'data': x}\n expected = {'output': np.reshape(x, output_shapes[i])}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=True)\n self.assertEqual(len(output_shapes[i]), builder._get_rank('output'))\n\n def test_squeeze(self):\n input_shapes = [(1, 1, 10, 5), (1, 10, 1, 5), (10, 5, 1, 1),\n (10, 5, 1, 1), (1,), (10, 5, 1, 1), (3, 1, 7)]\n axes = [(0, 1), (0, 2), (-2, -1), (-1, -2), (0,), (3, -2), (1,)]\n output_shapes = [(10, 5), (10, 5), (10, 5), (10, 5), (1,), (10, 5), (3, 7)]\n\n for i in range(len(input_shapes)):\n input_features = [('data', datatypes.Array(*input_shapes[i]))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n builder.add_squeeze(name='squeeze_layer', input_name='data',\n output_name='output', axes=list(axes[i]))\n\n x = np.random.rand(*input_shapes[i])\n input = {'data': x}\n expected = {'output': np.reshape(x, output_shapes[i])}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=True)\n self.assertEqual(len(output_shapes[i]), builder._get_rank('output'))\n\n def test_squeeze_all(self):\n input_shapes = [\n (1, 1, 10, 5), (1, 10, 1, 5), (10, 5, 1, 1), (10, 5, 1, 1), (1,),\n (10, 5, 1, 1), (3, 1, 7), (3,), (5, 6)\n ]\n for input_shape in input_shapes:\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n builder.add_squeeze(name='squeeze_layer', input_name='data',\n output_name='output', squeeze_all=True)\n\n x = np.random.rand(*input_shape)\n input = {'data': x}\n reference = np.squeeze(x)\n if not reference.shape:\n reference = np.reshape(reference, (1,))\n expected = {'output': reference}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=True)\n self.assertEqual(-1, builder._get_rank('output'))\n\n def test_argmax_argmin(self):\n test_input_shapes = [(9,), (8, 6), (9, 8, 10), (5, 9, 7, 9), (12, 8, 6, 6, 7)]\n\n # (1+2+3+4+5) * 2^3 = 120 test cases\n for input_shape in test_input_shapes:\n for negative_axis in [False, True]:\n for mode in ['argmax', 'argmin']:\n for keep_dims in [True, False]:\n for axis in np.arange(len(input_shape)):\n\n if negative_axis:\n axis_val = axis - len(input_shape)\n else:\n axis_val = axis\n\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n\n x = np.random.rand(*input_shape)\n\n if mode == 'argmax':\n builder.add_argmax('argmax', 'data', 'output', axis=axis_val, keepdims=keep_dims)\n np_out = np.argmax(x, axis=axis_val)\n else:\n builder.add_argmin('argmin', 'data', 'output', axis=axis_val, keepdims=keep_dims)\n np_out = np.argmin(x, axis=axis_val)\n\n if keep_dims:\n np_out = np.expand_dims(np_out, axis=axis_val)\n elif len(input_shape) == 1:\n np_out = np.expand_dims(np_out, axis=axis_val)\n\n input = {'data': x}\n expected = {'output': np_out}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=True)\n self.assertEqual(len(np_out.shape), builder._get_rank('output'))\n\n def test_get_shape(self):\n dims = [1, 2, 3, 4, 5]\n for rank in range(1, len(dims) + 1):\n input_shape = dims[:rank]\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n builder.add_get_shape(name='get_shape_layer', input_name='data',\n output_name='output')\n\n feed = {'data': np.random.rand(*input_shape)}\n expected = {'output': np.array(input_shape)}\n\n self._test_model(builder.spec, feed, expected, useCPUOnly=True)\n self.assertEqual(1, builder._get_rank('output'))\n\n def test_load_constant_nd(self):\n dims = [2, 3, 4, 5, 6]\n for rank in range(1, len(dims) + 1):\n input_shape = dims[:rank]\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n builder.add_load_constant_nd('load_const_nd_layer', 'tmp',\n constant_value=np.ones(input_shape),\n shape=input_shape)\n builder.add_elementwise('add_layer', ['data', 'tmp'], 'output',\n mode='ADD')\n feed = {'data': np.random.rand(*input_shape)}\n expected = {'output': feed['data'] + 1}\n\n self._test_model(builder.spec, feed, expected, useCPUOnly=True)\n self.assertEqual(rank, builder._get_rank('output'))\n\n @unittest.skip('fix')\n def test_simple_array_alloc_scatter(self):\n alloc_shape = [2, 3, 4]\n value_shape = [1, 3, 4]\n input_features = [('alloc_shape', datatypes.Array(len(alloc_shape))),\n ('value', datatypes.Array(*value_shape)),\n ('index', datatypes.Array(1))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features, disable_rank5_shape_mapping=True)\n builder.add_fill_dynamic(name='fill_dynamic_layer', input_name='alloc_shape',\n output_name='array', value=np.float(0.0))\n # CoreML input order: container (array), indices, slices (value)\n builder.add_scatter(name='scatter_layer',\n input_names=['array', 'index', 'value'],\n output_name='output')\n\n value = np.random.rand(*value_shape).astype('float')\n feed = {'alloc_shape': np.array(alloc_shape, dtype='float'),\n 'value': value,\n 'index': np.array([1], dtype='float')}\n\n ref = np.zeros(alloc_shape)\n ref[1, :, :] = value\n expected = {'output': ref}\n\n self._test_model(builder.spec, feed, expected, useCPUOnly=True)\n\n def test_erf_activation_cpu(self, cpu_only=True):\n input_features = [('data', datatypes.Array(10, 45))]\n output_features = [('output', datatypes.Array(10, 45))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features, disable_rank5_shape_mapping=True)\n builder.add_erf(name='erf', input_name='data',\n output_name='output')\n x = np.random.rand(10, 45)\n input = {'data': x}\n expected = {\n 'output': np.asarray([math.erf(i) for i in\n x.flatten().tolist()]).reshape(10, 45)\n }\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_erf_activation_gpu(self):\n self.test_erf_activation_cpu(cpu_only=False)\n\n def test_gelu_activation(self):\n\n for mode in ['EXACT', 'TANH_APPROXIMATION', 'SIGMOID_APPROXIMATION']:\n for rank in range(1, 6):\n shape = np.random.randint(low=2, high=5, size=rank)\n input_features = [('data', datatypes.Array(*shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features, disable_rank5_shape_mapping=True)\n builder.add_gelu(name='gelu', input_name='data',\n output_name='output', mode=mode)\n\n x = np.random.rand(*shape)\n input = {'data': x}\n exact = np.asarray([0.5 * i * (1.0 + math.erf(i / math.sqrt(2)))\n for i in x.flatten().tolist()]).reshape(*shape)\n\n expected = {'output': exact}\n self._test_model(builder.spec, input, expected, useCPUOnly=True)\n\n def test_lower_triangular_cpu(self, cpu_only=True):\n for rank in range(2, 6):\n for k in range(-3, 4):\n shape = np.random.randint(low=2, high=6, size=rank)\n input_features = [('data', datatypes.Array(*shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features, disable_rank5_shape_mapping=True)\n\n builder.add_lower_triangular('tril', 'data', 'output', k=k)\n\n x = np.random.rand(*shape)\n input = {'data': x}\n expected = {'output': np.tril(x, k=k)}\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_lower_triangular_gpu(self):\n self.test_lower_triangular_cpu(cpu_only=False)\n\n def test_upper_triangular_cpu(self, cpu_only=True):\n for rank in range(2, 6):\n for k in range(-3, 4):\n shape = np.random.randint(low=2, high=6, size=rank)\n input_features = [('data', datatypes.Array(*shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features, disable_rank5_shape_mapping=True)\n\n builder.add_upper_triangular('triu', 'data', 'output', k=k)\n\n x = np.random.rand(*shape)\n input = {'data': x}\n expected = {'output': np.triu(x, k=k)}\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_upper_triangular_gpu(self):\n self.test_upper_triangular_cpu(cpu_only=False)\n\n def test_where_broadcastable_cpu(self, cpu_only=True):\n for _ in range(150):\n rank_cond = np.random.randint(low=1, high=6)\n rank_true = np.random.randint(low=1, high=6)\n rank_false = np.random.randint(low=1, high=6)\n\n rank_out = max(rank_cond, rank_true, rank_false)\n\n shape_cond = np.random.randint(low=2, high=8, size=rank_cond)\n shape_true = np.random.randint(low=2, high=8, size=rank_true)\n shape_false = np.random.randint(low=2, high=8, size=rank_false)\n\n for i in range(-1, -rank_out - 1, -1):\n dims = []\n if -i <= rank_cond: dims.append(shape_cond[i])\n if -i <= rank_true: dims.append(shape_true[i])\n if -i <= rank_false: dims.append(shape_false[i])\n\n dim = np.random.choice(dims)\n if -i <= rank_cond: shape_cond[i] = np.random.choice([1, dim])\n if -i <= rank_true: shape_true[i] = np.random.choice([1, dim])\n if -i <= rank_false: shape_false[i] = np.random.choice([1, dim])\n\n input_features = [\n ('cond', datatypes.Array(*shape_cond)),\n ('true', datatypes.Array(*shape_true)),\n ('false', datatypes.Array(*shape_false))\n ]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features, disable_rank5_shape_mapping=True)\n\n builder.add_where_broadcastable('if_broadcastable', input_names=['cond', 'true', 'false'],\n output_name='output')\n\n cond = np.random.choice([1.0, 0.0], size=shape_cond)\n true = np.random.rand(*shape_true)\n false = np.random.rand(*shape_false)\n\n input = {'cond': cond, 'true': true, 'false': false}\n expected = {'output': np.where(cond, true, false)}\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n self.assertEqual(len(expected['output'].shape), builder._get_rank('output'))\n\n def test_where_broadcastable_gpu(self):\n self.test_where_broadcastable_cpu(cpu_only=False)\n\n def test_random_normal_like_cpu(self, cpu_only=True):\n mean, stddev, seed = 0., 1., 42\n\n for rank in range(5, -1, -1):\n if rank > 0:\n low_factor = np.random.randint(low=2, high=4)\n low = int(np.power(1000, 1. / rank)) * low_factor\n high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)\n shape = np.random.randint(low=low, high=high, size=rank)\n else: # one extra test to test more moments\n shape = np.array([10, 10, 10, 10, 10000])\n\n input_features = [('tensor', datatypes.Array(*shape))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)], disable_rank5_shape_mapping=True)\n\n builder.add_random_normal_like(name='random_normal_like',\n input_name='tensor',\n output_name='output',\n mean=mean, stddev=stddev, seed=seed)\n\n inputs = {'tensor': np.random.rand(*shape)}\n expected = {'output': np.random.normal(mean, stddev, shape)}\n\n if rank > 0:\n CorrectnessTest._compare_moments(builder.spec, inputs, expected, num_moments=2)\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n else: # one extra test to test more moments\n CorrectnessTest._compare_moments(builder.spec, inputs, expected, num_moments=6)\n\n def test_random_normal_like_gpu(self):\n self.test_random_normal_like_cpu(cpu_only=False)\n\n def test_random_normal_static_cpu(self, cpu_only=True):\n\n mean, stddev, seed = 0., 1., 42\n\n for rank in range(1, 6):\n low_factor = np.random.randint(low=2, high=4)\n low = int(np.power(1000, 1. / rank)) * low_factor\n high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)\n\n shape = np.random.randint(low=low, high=high, size=rank)\n\n input_features = [('data', datatypes.Array(*shape))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)], disable_rank5_shape_mapping=True)\n\n builder.add_random_normal_static(name='random_normal_static',\n output_name='tmp',\n output_shape=list(shape),\n mean=mean, stddev=stddev, seed=seed)\n\n builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')\n\n data = np.zeros(shape)\n inputs = {'data': data}\n expected = {'output': data + np.random.normal(mean, stddev, shape)}\n\n CorrectnessTest._compare_moments(builder.spec, inputs, expected, num_moments=2)\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n self.assertEqual(rank, builder._get_rank('output'))\n\n def test_random_normal_static_gpu(self):\n self.test_random_normal_static_cpu(cpu_only=False)\n\n def test_random_normal_dynamic_cpu(self, cpu_only=True):\n mean, stddev, seed = 0., 1., 42\n\n for rank in range(1, 6):\n low_factor = np.random.randint(low=2, high=4)\n low = int(np.power(1000, 1. / rank)) * low_factor\n high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)\n\n shape = np.random.randint(low=low, high=high, size=rank)\n\n input_features = [('shape', datatypes.Array(len(shape)))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)], disable_rank5_shape_mapping=True)\n\n builder.add_random_normal_dynamic(name='random_normal_dynamic',\n input_names=['shape'],\n output_name='output',\n mean=mean, stddev=stddev, seed=seed)\n\n inputs = {'shape': np.array(shape, np.float)}\n expected = {'output': np.random.normal(mean, stddev, shape)}\n\n CorrectnessTest._compare_moments(builder.spec, inputs, expected, num_moments=2)\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n self.assertEqual(-1, builder._get_rank('output'))\n\n def test_random_normal_dynamic_gpu(self):\n self.test_random_normal_dynamic_cpu(cpu_only=False)\n\n def test_random_uniform_like_cpu(self, cpu_only=True):\n minval, maxval, seed = 0., 1., 42\n\n for rank in range(1, 6):\n low_factor = np.random.randint(low=2, high=4)\n low = int(np.power(1000, 1. / rank)) * low_factor\n high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)\n\n shape = np.random.randint(low=low, high=high, size=rank)\n\n input_features = [('tensor', datatypes.Array(*shape))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)], disable_rank5_shape_mapping=True)\n\n builder.add_random_uniform_like(name='random_uniform_like',\n input_name='tensor',\n output_name='output',\n minval=minval, maxval=maxval, seed=seed)\n\n tensor = np.random.rand(*shape)\n inputs = {'tensor': tensor}\n expected = {'output': np.random.uniform(minval, maxval, shape)}\n\n CorrectnessTest._compare_moments(builder.spec, inputs, expected)\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n self.assertEqual(rank, builder._get_rank('output'))\n\n def test_random_uniform_like_gpu(self):\n self.test_random_uniform_like_cpu(cpu_only=False)\n\n def test_random_uniform_static_cpu(self, cpu_only=True):\n minval, maxval, seed = 0., 1., 42\n\n for rank in range(1, 6):\n low_factor = np.random.randint(low=2, high=4)\n low = int(np.power(1000, 1. / rank)) * low_factor\n high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)\n\n shape = np.random.randint(low=low, high=high, size=rank)\n\n input_features = [('data', datatypes.Array(*shape))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)], disable_rank5_shape_mapping=True)\n\n builder.add_random_uniform_static(name='random_uniform_static',\n output_name='tmp',\n output_shape=list(shape),\n minval=minval, maxval=maxval, seed=seed)\n\n builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')\n\n data = np.zeros(shape)\n inputs = {'data': data}\n expected = {'output': data + np.random.uniform(minval, maxval, shape)}\n\n CorrectnessTest._compare_moments(builder.spec, inputs, expected)\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n self.assertEqual(rank, builder._get_rank('output'))\n\n def test_random_uniform_static_gpu(self):\n self.test_random_uniform_static_cpu(cpu_only=False)\n\n def test_random_uniform_dynamic_cpu(self, cpu_only=True):\n minval, maxval, seed = 0., 1., 42\n\n for rank in range(1, 6):\n low_factor = np.random.randint(low=2, high=4)\n low = int(np.power(1000, 1. / rank)) * low_factor\n high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)\n\n shape = np.random.randint(low=low, high=high, size=rank)\n\n input_features = [('shape', datatypes.Array(len(shape)))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)], disable_rank5_shape_mapping=True)\n\n builder.add_random_uniform_dynamic(name='random_uniform_dynamic',\n input_names=['shape'],\n output_name='output',\n minval=minval, maxval=maxval, seed=seed)\n\n inputs = {'shape': np.array(shape, np.float)}\n expected = {'output': np.random.uniform(minval, maxval, shape)}\n\n CorrectnessTest._compare_moments(builder.spec, inputs, expected)\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n self.assertEqual(-1, builder._get_rank('output'))\n\n def test_random_uniform_dynamic_gpu(self):\n self.test_random_uniform_dynamic_cpu(cpu_only=False)\n\n def test_random_bernoulli_like_cpu(self, cpu_only=True):\n\n prob, seed = 0.5, 42\n\n for rank in range(1, 6):\n low_factor = np.random.randint(low=2, high=4)\n low = int(np.power(1000, 1. / rank)) * low_factor\n high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)\n\n shape = np.random.randint(low=low, high=high, size=rank)\n\n input_features = [('tensor', datatypes.Array(*shape))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)], disable_rank5_shape_mapping=True)\n\n builder.add_random_bernoulli_like(name='random_bernoulli_like',\n input_name='tensor',\n output_name='output',\n prob=prob, seed=seed)\n\n tensor = np.random.rand(*shape)\n inputs = {'tensor': tensor}\n expected = {'output': np.random.binomial(1, prob, shape)}\n\n CorrectnessTest._compare_moments(builder.spec, inputs, expected)\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n\n def test_random_bernoulli_like_gpu(self):\n self.test_random_bernoulli_like_cpu(cpu_only=False)\n\n def test_random_bernoulli_static_cpu(self, cpu_only=True):\n prob, seed = 0.5, 42\n\n for rank in range(1, 6):\n low_factor = np.random.randint(low=2, high=4)\n low = int(np.power(1000, 1. / rank)) * low_factor\n high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)\n\n shape = np.random.randint(low=low, high=high, size=rank)\n\n input_features = [('data', datatypes.Array(*shape))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)], disable_rank5_shape_mapping=True)\n\n builder.add_random_bernoulli_static(name='random_bernoulli_static', output_name='tmp',\n output_shape=list(shape), prob=prob, seed=seed)\n\n builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')\n\n data = np.zeros(shape)\n inputs = {'data': data}\n expected = {'output': data + np.random.binomial(1, prob, shape)}\n\n CorrectnessTest._compare_moments(builder.spec, inputs, expected)\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n\n def test_random_bernoulli_static_gpu(self):\n self.test_random_bernoulli_static_cpu(cpu_only=False)\n\n def test_random_bernoulli_dynamic_cpu(self, cpu_only=True):\n prob, seed = 0.5, 42\n\n for rank in range(1, 6):\n low_factor = np.random.randint(low=2, high=4)\n low = int(np.power(1000, 1. / rank)) * low_factor\n high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)\n\n shape = np.random.randint(low=low, high=high, size=rank)\n\n input_features = [('shape', datatypes.Array(len(shape)))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)], disable_rank5_shape_mapping=True)\n\n builder.add_random_bernoulli_dynamic(name='random_bernoulli_dynamic',\n input_names=['shape'],\n output_name='output',\n prob=prob, seed=seed)\n\n inputs = {'shape': np.array(shape, np.float)}\n expected = {'output': np.random.binomial(1, prob, shape)}\n\n CorrectnessTest._compare_moments(builder.spec, inputs, expected)\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n\n def test_random_bernoulli_dynamic_gpu(self):\n self.test_random_bernoulli_dynamic_cpu(cpu_only=False)\n\n def test_categorical_distribution_cpu_shapes(self):\n\n for rank in range(1, 6):\n shape = np.random.randint(low=2, high=8, size=rank)\n num_samples = np.random.randint(low=10, high=1000)\n\n input_features = [('data', datatypes.Array(*shape))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)], disable_rank5_shape_mapping=True)\n\n builder.add_categorical_distribution(name='categorical_distribution',\n input_name='data',\n output_name='output',\n num_samples=num_samples)\n\n x = np.random.randint(low=0, high=20, size=shape).astype(np.float32)\n inputs = {'data': x}\n shape[-1] = num_samples\n expected = {'output': np.random.rand(*shape)}\n\n self._test_model(builder.spec, inputs, expected, useCPUOnly=True, validate_shapes_only=True)\n\n def test_categorical_distribution_cpu_logits(self):\n\n def softmax(data):\n e_data = np.exp(data - np.max(data))\n return e_data / e_data.sum()\n\n num_samples, num_class = 50000, 10\n input_name, output_name = 'data', 'output'\n\n shapes = [(2, num_class), (2, 1, num_class), (1, 2, num_class),\n (2, 1, 1, num_class), (1, 2, 1, num_class), (1, 1, 2, num_class),\n (2, 1, 1, 1, num_class), (1, 2, 1, 1, num_class),\n (1, 1, 2, 1, num_class), (1, 1, 1, 2, num_class)]\n\n for shape in shapes:\n input_features = [('data', datatypes.Array(*shape))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)], disable_rank5_shape_mapping=True)\n\n builder.add_categorical_distribution(name='categorical_distribution',\n input_name=input_name,\n output_name=output_name,\n num_samples=num_samples,\n is_logits=True,\n seed=42)\n\n x = np.random.rand(*shape)\n inputs = {input_name: x}\n\n model = builder.spec\n if isinstance(model, str):\n model = coremltools.models.MLModel(model)\n\n model = coremltools.models.MLModel(model, useCPUOnly=True)\n prediction = model.predict(inputs, useCPUOnly=True)\n\n # validate each distribution separately\n logits = x.reshape(2, num_class)\n probs = [softmax(logits[0]), softmax(logits[1])]\n\n ref0 = np.random.multinomial(num_samples, probs[0])\n ref1 = np.random.multinomial(num_samples, probs[1])\n\n pre0 = prediction[output_name].reshape(2, num_samples)[0]\n pre1 = prediction[output_name].reshape(2, num_samples)[1]\n\n expected = {output_name: np.stack((pre0, pre1))}\n\n # convert to bincount and validate probabilities\n pre0 = np.bincount(np.array(pre0).astype(np.int), minlength=num_class)\n pre1 = np.bincount(np.array(pre1).astype(np.int), minlength=num_class)\n\n assert np.allclose(np.true_divide(pre0, num_samples), probs[0], atol=1e-2)\n assert np.allclose(np.true_divide(pre0, num_samples),\n np.true_divide(ref0, num_samples), atol=1e-2)\n\n assert np.allclose(np.true_divide(pre1, num_samples), probs[1], atol=1e-2)\n assert np.allclose(np.true_divide(pre1, num_samples),\n np.true_divide(ref1, num_samples), atol=1e-2)\n\n self._test_model(model, inputs, expected, useCPUOnly=True,\n output_name_shape_dict={'output': prediction['output'].shape})\n\n def test_categorical_distribution_cpu_probs(self):\n\n def softmax(data):\n e_data = np.exp(data - np.max(data))\n return e_data / e_data.sum()\n\n num_samples, num_class = 50000, 10\n input_name, output_name = 'data', 'output'\n\n shapes = [(2, num_class), (2, 1, num_class), (1, 2, num_class),\n (2, 1, 1, num_class), (1, 2, 1, num_class), (1, 1, 2, num_class),\n (2, 1, 1, 1, num_class), (1, 2, 1, 1, num_class),\n (1, 1, 2, 1, num_class), (1, 1, 1, 2, num_class)]\n\n for shape in shapes:\n input_features = [('data', datatypes.Array(*shape))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)], disable_rank5_shape_mapping=True)\n\n builder.add_categorical_distribution(name='categorical_distribution',\n input_name=input_name,\n output_name=output_name,\n num_samples=num_samples,\n is_logits=False,\n seed=42)\n\n x = np.random.rand(*shape)\n probs = x.reshape(2, num_class)\n probs[0], probs[1] = softmax(probs[0]), softmax(probs[1])\n inputs = {input_name: np.reshape(probs, shape)}\n\n model = builder.spec\n if isinstance(model, str):\n model = coremltools.models.MLModel(model)\n\n model = coremltools.models.MLModel(model, useCPUOnly=True)\n prediction = model.predict(inputs, useCPUOnly=True)\n\n # validate each distribution separately\n probs = probs.reshape(2, num_class)\n\n ref0 = np.random.multinomial(num_samples, probs[0])\n ref1 = np.random.multinomial(num_samples, probs[1])\n\n pre0 = prediction[output_name].reshape(2, num_samples)[0]\n pre1 = prediction[output_name].reshape(2, num_samples)[1]\n\n expected = {output_name: np.stack((pre0, pre1))}\n\n # convert to bincount and validate probabilities\n pre0 = np.bincount(np.array(pre0).astype(np.int), minlength=num_class)\n pre1 = np.bincount(np.array(pre1).astype(np.int), minlength=num_class)\n\n assert np.allclose(np.true_divide(pre0, num_samples), probs[0], atol=1e-2)\n assert np.allclose(np.true_divide(pre0, num_samples),\n np.true_divide(ref0, num_samples), atol=1e-2)\n\n assert np.allclose(np.true_divide(pre1, num_samples), probs[1], atol=1e-2)\n assert np.allclose(np.true_divide(pre1, num_samples),\n np.true_divide(ref1, num_samples), atol=1e-2)\n\n self._test_model(model, inputs, expected, useCPUOnly=True,\n output_name_shape_dict={'output': prediction['output'].shape})\n\n def test_reverse_cpu(self, cpu_only=True):\n\n for rank in range(1, 6):\n for _ in range(20):\n input_shape = np.random.randint(low=2, high=8, size=rank)\n reverse_dim = [np.random.choice([True, False]) for _ in range(rank)]\n axes = [i for i in range(rank) if reverse_dim[i] == True]\n\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_reverse('reverse', 'data', 'output', reverse_dim)\n\n x = np.random.rand(*input_shape)\n input = {'data': x}\n expected = {'output': np.flip(x, axis=axes)}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_reverse_gpu(self):\n self.test_reverse_cpu(cpu_only=False)\n\n def test_matrix_band_part_cpu(self, cpu_only=True):\n\n for rank in range(2, 6):\n for _ in range(20):\n num_lower = np.random.randint(low=-7, high=8)\n num_upper = np.random.randint(low=-7, high=8)\n shape = np.random.randint(low=2, high=6, size=rank)\n input_features = [('data', datatypes.Array(*shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features, disable_rank5_shape_mapping=True)\n\n builder.add_matrix_band_part('matrix_band_part', 'data', 'output',\n num_lower=num_lower, num_upper=num_upper)\n\n x = np.random.rand(*shape)\n input = {'data': x}\n\n rows, cols = shape[-2:]\n band = np.ones((rows, cols))\n for m in range(rows):\n for n in range(cols):\n band[m, n] = (num_lower < 0 or (m - n) <= num_lower) and (num_upper < 0 or (n - m) <= num_upper)\n\n expected = {'output': np.multiply(band, x)}\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_matrix_band_part_gpu(self):\n self.test_matrix_band_part_cpu(cpu_only=False)\n\n def test_flatten_to_2d_cpu(self, cpu_only=True):\n\n for rank in range(1, 6):\n for axis in range(-rank, rank + 1):\n shape = np.random.randint(low=2, high=6, size=rank)\n input_features = [('data', datatypes.Array(*shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features, disable_rank5_shape_mapping=True)\n\n builder.add_flatten_to_2d('flatten_to_2d', 'data', 'output', axis=axis)\n\n x = np.random.rand(*shape)\n np_axis = axis + rank if axis < 0 else axis\n pl, pr = 1, 1\n for i in range(0, np_axis):\n pl *= shape[i]\n for i in range(np_axis, len(shape)):\n pr *= shape[i]\n\n new_shape = [pl, pr]\n ref = x.reshape(new_shape)\n\n input = {'data': x}\n expected = {'output': ref}\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n self.assertEqual(2, builder._get_rank('output'))\n\n def test_flatten_to_2d_gpu(self):\n self.test_flatten_to_2d_cpu(cpu_only=False)\n\n def test_reshape_like_cpu(self, cpu_only=True):\n\n for rank in range(1, 6):\n for _ in range(20):\n input_shape = np.random.randint(low=2, high=8, size=rank)\n n = int(np.prod(input_shape))\n divisors = [d for d in range(1, n) if n % d == 0]\n target_rank = np.random.randint(low=2, high=6)\n target_shape = [1]\n for i in range(target_rank - 1):\n dim_size = np.random.choice(divisors)\n while n % (np.prod(target_shape) * dim_size) != 0:\n dim_size = np.random.choice(divisors)\n target_shape.append(dim_size)\n target_shape[0] = n // np.prod(target_shape)\n\n np.random.shuffle(target_shape)\n input_features = [('data', datatypes.Array(*input_shape)),\n ('tensor', datatypes.Array(*target_shape))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)],\n disable_rank5_shape_mapping=True)\n\n builder.add_reshape_like(name='reshape_like',\n input_names=['data', 'tensor'],\n output_name='output')\n\n data = np.random.rand(*input_shape)\n tensor = np.random.rand(*target_shape)\n inputs = {'data': data, 'tensor': tensor}\n expected = {'output': np.reshape(data, target_shape)}\n\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n self.assertEqual(target_rank, builder._get_rank('output'))\n\n def test_reshape_like_gpu(self):\n self.test_reshape_like_cpu(cpu_only=False)\n\n def test_reshape_static_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n for _ in range(20):\n input_shape = np.random.randint(low=2, high=8, size=rank)\n n = int(np.prod(input_shape))\n divisors = [d for d in range(1, n) if n % d == 0]\n target_rank = np.random.randint(low=2, high=6)\n\n target_shape = [1]\n for i in range(target_rank - 1):\n dim_size = np.random.choice(divisors)\n while n % (np.prod(target_shape) * dim_size) != 0:\n dim_size = np.random.choice(divisors)\n target_shape.append(dim_size)\n\n target_shape[0] = -1\n\n np.random.shuffle(target_shape)\n input_features = [('data', datatypes.Array(*input_shape))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)],\n disable_rank5_shape_mapping=True)\n\n builder.add_reshape_static(name='reshape_static',\n input_name='data',\n output_name='output',\n output_shape=target_shape)\n\n data = np.random.rand(*input_shape)\n inputs = {'data': data}\n expected = {'output': np.reshape(data, target_shape)}\n\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n self.assertEqual(len(target_shape), builder._get_rank('output'))\n\n def test_reshape_static_gpu(self):\n self.test_reshape_static_cpu(cpu_only=False)\n\n def test_reshape_dynamic_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n for _ in range(20):\n input_shape = np.random.randint(low=2, high=8, size=rank)\n n = int(np.prod(input_shape))\n divisors = [d for d in range(1, n) if n % d == 0]\n target_rank = np.random.randint(low=2, high=6)\n\n target_shape = [1]\n for i in range(target_rank - 1):\n dim_size = np.random.choice(divisors)\n while n % (np.prod(target_shape) * dim_size) != 0:\n dim_size = np.random.choice(divisors)\n target_shape.append(dim_size)\n\n target_shape[0] = -1\n\n np.random.shuffle(target_shape)\n input_features = [('data', datatypes.Array(*input_shape)),\n ('shape', datatypes.Array(len(target_shape)))]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, [('output', None)],\n disable_rank5_shape_mapping=True)\n\n builder.add_reshape_dynamic(name='reshape_dynamic',\n input_names=['data', 'shape'],\n output_name='output')\n\n data = np.random.rand(*input_shape)\n inputs = {'data': data, 'shape': np.array(target_shape, dtype='float')}\n expected = {'output': np.reshape(data, target_shape)}\n\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n self.assertEqual(-1, builder._get_rank('output'))\n\n def test_reshape_dynamic_gpu(self):\n self.test_reshape_dynamic_cpu(cpu_only=False)\n\n def test_reduce_sum_cpu(self, cpu_only=True):\n\n for rank in range(1, 6):\n axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]\n axes_list.append(None)\n\n for axes in axes_list:\n if axes:\n axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])\n reduce_all = False\n else:\n reduce_all = True\n\n for keep_dims in [True, False]:\n input_shape = np.random.randint(low=2, high=5, size=rank)\n\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n\n builder.add_reduce_sum('reduce', 'data', 'output', axes, keepdims=keep_dims, reduce_all=reduce_all)\n\n x = np.random.rand(*input_shape)\n input = {'data': x}\n expected = {'output': np.add.reduce(x, axes, keepdims=keep_dims)}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n expected_rank = len(expected['output'].shape)\n if expected_rank == 0:\n expected_rank = 1\n self.assertEqual(expected_rank, builder._get_rank('output'))\n \n def test_reduce_sum_gpu(self):\n self.test_reduce_sum_cpu(cpu_only=False)\n\n def test_reduce_prod_cpu(self, cpu_only=True):\n\n for rank in range(1, 6):\n axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]\n axes_list.append(None)\n\n for axes in axes_list:\n if axes:\n axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])\n reduce_all = False\n else:\n reduce_all = True\n\n for keep_dims in [True, False]:\n input_shape = np.random.randint(low=2, high=5, size=rank)\n\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n\n builder.add_reduce_prod('reduce', 'data', 'output', axes, keepdims=keep_dims,\n reduce_all=reduce_all)\n\n x = np.random.rand(*input_shape)\n input = {'data': x}\n expected = {'output': np.multiply.reduce(x, axes, keepdims=keep_dims)}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n expected_rank = len(expected['output'].shape)\n if expected_rank == 0:\n expected_rank = 1\n self.assertEqual(expected_rank, builder._get_rank('output'))\n\n def test_reduce_prod_gpu(self):\n self.test_reduce_prod_cpu(cpu_only=False)\n\n def test_reduce_mean_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]\n axes_list.append(None)\n\n for axes in axes_list:\n if axes:\n axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])\n reduce_all = False\n else:\n reduce_all = True\n\n for keep_dims in [True, False]:\n input_shape = np.random.randint(low=2, high=5, size=rank)\n\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n\n builder.add_reduce_mean('reduce', 'data', 'output', axes, keepdims=keep_dims,\n reduce_all=reduce_all)\n\n x = np.random.rand(*input_shape)\n input = {'data': x}\n expected = {'output': np.mean(x, axes, keepdims=keep_dims)}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_reduce_mean_gpu(self):\n self.test_reduce_mean_cpu(cpu_only=False)\n\n def test_reduce_max_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]\n axes_list.append(None)\n\n for axes in axes_list:\n if axes:\n axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])\n reduce_all = False\n else:\n reduce_all = True\n\n for keep_dims in [True, False]:\n input_shape = np.random.randint(low=2, high=5, size=rank)\n\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n\n builder.add_reduce_max('reduce', 'data', 'output', axes, keepdims=keep_dims, reduce_all=reduce_all)\n\n x = np.random.rand(*input_shape)\n input = {'data': x}\n expected = {'output': np.maximum.reduce(x, axes, keepdims=keep_dims)}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_reduce_max_gpu(self):\n self.test_reduce_max_cpu(cpu_only=False)\n\n def test_reduce_min_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]\n axes_list.append(None)\n\n for axes in axes_list:\n if axes:\n axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])\n reduce_all = False\n else:\n reduce_all = True\n\n for keep_dims in [True, False]:\n input_shape = np.random.randint(low=2, high=5, size=rank)\n\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n\n builder.add_reduce_min('reduce', 'data', 'output', axes, keepdims=keep_dims, reduce_all=reduce_all)\n\n x = np.random.rand(*input_shape)\n input = {'data': x}\n expected = {'output': np.minimum.reduce(x, axes, keepdims=keep_dims)}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_reduce_min_gpu(self):\n self.test_reduce_min_cpu(cpu_only=False)\n\n def test_reduce_l2_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]\n axes_list.append(None)\n\n for axes in axes_list:\n if axes:\n axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])\n reduce_all = False\n else:\n reduce_all = True\n\n for keep_dims in [True, False]:\n input_shape = np.random.randint(low=2, high=5, size=rank)\n\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n\n builder.add_reduce_l2('reduce', 'data', 'output', axes, keepdims=keep_dims, reduce_all=reduce_all)\n\n x = np.random.rand(*input_shape)\n input = {'data': x}\n expected = {'output': np.sqrt(np.sum(np.square(x), axis=axes, keepdims=keep_dims))}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_reduce_l2_gpu(self):\n self.test_reduce_l2_cpu(cpu_only=False)\n\n def test_reduce_l1_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]\n axes_list.append(None)\n\n for axes in axes_list:\n if axes:\n axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])\n reduce_all = False\n else:\n reduce_all = True\n\n for keep_dims in [True, False]:\n input_shape = np.random.randint(low=2, high=5, size=rank)\n\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n\n builder.add_reduce_l1('reduce', 'data', 'output', axes, keepdims=keep_dims, reduce_all=reduce_all)\n\n x = np.random.rand(*input_shape)\n input = {'data': x}\n expected = {'output': np.sum(np.abs(x), axis=axes, keepdims=keep_dims)}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_reduce_l1_gpu(self):\n self.test_reduce_l1_cpu(cpu_only=False)\n\n def test_reduce_sumsquare_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]\n axes_list.append(None)\n\n for axes in axes_list:\n if axes:\n axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])\n reduce_all = False\n else:\n reduce_all = True\n\n for keep_dims in [True, False]:\n input_shape = np.random.randint(low=2, high=5, size=rank)\n\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n\n builder.add_reduce_sumsquare('reduce', 'data', 'output', axes, keepdims=keep_dims,\n reduce_all=reduce_all)\n\n x = np.random.rand(*input_shape)\n input = {'data': x}\n expected = {'output': np.sum(np.square(x), axis=axes, keepdims=keep_dims)}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_reduce_sumsquare_gpu(self):\n self.test_reduce_sumsquare_cpu(cpu_only=False)\n\n def test_reduce_logsum_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]\n axes_list.append(None)\n\n for axes in axes_list:\n if axes:\n axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])\n reduce_all = False\n else:\n reduce_all = True\n\n for keep_dims in [True, False]:\n input_shape = np.random.randint(low=2, high=5, size=rank)\n\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n\n builder.add_reduce_logsum('reduce', 'data', 'output', axes, keepdims=keep_dims,\n reduce_all=reduce_all)\n\n x = np.random.rand(*input_shape)\n input = {'data': x}\n expected = {'output': np.log(np.sum(x, axis=axes, keepdims=keep_dims))}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_reduce_logsum_gpu(self):\n self.test_reduce_logsum_cpu(cpu_only=False)\n\n def test_reduce_logsumexp_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]\n axes_list.append(None)\n\n for axes in axes_list:\n if axes:\n axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])\n reduce_all = False\n else:\n reduce_all = True\n\n for keep_dims in [True, False]:\n input_shape = np.random.randint(low=2, high=5, size=rank)\n\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n\n builder.add_reduce_logsumexp('reduce', 'data', 'output', axes, keepdims=keep_dims,\n reduce_all=reduce_all)\n\n x = np.random.rand(*input_shape)\n input = {'data': x}\n expected = {'output': np.log(np.sum(np.exp(x), axis=axes, keepdims=keep_dims))}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_reduce_logsumexp_gpu(self):\n self.test_reduce_logsumexp_cpu(cpu_only=False)\n\n def test_reverse_sequence_cpu(self, cpu_only=True):\n for rank in range(2, 6):\n for i in range(20):\n input_shape = np.random.randint(low=2, high=6, size=rank)\n\n seq_axis = np.random.randint(low=-rank, high=rank)\n batch_axis = np.random.randint(low=-rank, high=rank)\n pos_batch_axis = batch_axis if batch_axis >= 0 else rank + batch_axis\n pos_seq_axis = seq_axis if seq_axis >= 0 else rank + seq_axis\n while pos_batch_axis >= pos_seq_axis:\n seq_axis = np.random.randint(low=-rank, high=rank)\n batch_axis = np.random.randint(low=-rank, high=rank)\n pos_batch_axis = batch_axis if batch_axis >= 0 else rank + batch_axis\n pos_seq_axis = seq_axis if seq_axis >= 0 else rank + seq_axis\n\n input_features = [('data', datatypes.Array(*input_shape)),\n ('lengths', datatypes.Array(input_shape[batch_axis]))]\n\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n\n builder.add_reverse_sequence('reverse_sequence', ['data', 'lengths'],\n 'output', batch_axis=batch_axis,\n seq_axis=seq_axis)\n\n data = np.random.rand(*input_shape)\n lengths = np.random.randint(low=0, high=input_shape[seq_axis], size=input_shape[batch_axis])\n\n input = {'data': data, 'lengths': lengths.astype(np.float32)}\n\n with tf.Graph().as_default(), tf.Session() as sess:\n tf_op = tf.reverse_sequence(input=data, seq_lengths=lengths,\n seq_axis=pos_seq_axis, batch_axis=pos_batch_axis)\n expected = {'output': sess.run(tf_op)}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_reverse_sequence_gpu(self):\n self.test_reverse_sequence_cpu(cpu_only=False)\n\n def test_where_nonzero_cpu(self, cpu_only=True):\n\n for rank in range(1, 6):\n for i in range(10):\n shape = np.random.randint(low=2, high=8, size=rank)\n\n input_features = [('data', datatypes.Array(*shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n\n builder.add_where_nonzero('multi_indices', 'data', 'output')\n\n x = np.random.randint(low=0, high=3, size=shape)\n\n input = {'data': x.astype(np.float)}\n expected = {'output': np.transpose(np.nonzero(x)).astype(np.float)}\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_where_nonzero_gpu(self):\n self.test_where_nonzero_cpu(cpu_only=False)\n\n def test_gather_cpu(self, cpu_only=True):\n for rankParams, rankIndices in [(i, j) for i in range(1, 6) for j in range(1, 6)]:\n for axis in range(-rankParams, rankParams):\n shapeParams = np.random.randint(low=2, high=5, size=rankParams)\n shapeIndices = np.random.randint(low=2, high=5,\n size=rankIndices)\n input_shapes = [shapeParams, shapeIndices]\n posAxis = axis if axis >= 0 else axis + rankParams\n output_shape = list(shapeParams[:posAxis]) + list(\n shapeIndices) + list(shapeParams[posAxis + 1:])\n\n if len(output_shape) > 5:\n continue\n\n input_names = ['params', 'indices']\n input_features = [\n ('params', datatypes.Array(*input_shapes[0])),\n ('indices', datatypes.Array(*input_shapes[1]))\n ]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_gather(name='gather', input_names=input_names,\n output_name='output', axis=axis)\n\n a = np.random.rand(*input_shapes[0])\n b = np.random.randint(-shapeParams[axis], shapeParams[axis],\n size=shapeIndices)\n input = {'params': a, 'indices': b.astype(np.float)}\n expected = {'output': np.take(a, b, axis=axis)}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n self.assertEqual(len(expected['output'].shape), builder._get_rank('output'))\n\n def test_gather_gpu(self):\n self.test_gather_cpu(cpu_only=False)\n\n def test_gather_along_axis_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n for axis in range(-rank, rank):\n for _ in range(5):\n params_shape = np.random.randint(low=2, high=8, size=rank)\n indices_shape = np.copy(params_shape)\n indices_shape[axis] = np.random.randint(low=1, high=8)\n\n input_features = [('params', datatypes.Array(*params_shape)),\n ('indices', datatypes.Array(*indices_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n builder.add_gather_along_axis('gather_along_axis', ['params', 'indices'], 'output', axis=axis)\n\n a = np.random.rand(*params_shape)\n b = np.random.randint(-params_shape[axis], params_shape[axis], size=indices_shape)\n\n input = {'params': a, 'indices': b.astype(np.float)}\n expected = {'output': np.take_along_axis(a, b, axis=axis)}\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n self.assertEqual(len(expected['output'].shape), builder._get_rank('output'))\n\n def test_gather_along_axis_gpu(self):\n self.test_gather_along_axis_cpu(cpu_only=False)\n\n def test_gather_nd_cpu(self, cpu_only=True):\n for params_rank, indices_rank in [(i, j) for i in range(1, 6) for j in range(1, 6)]:\n params_shape = np.random.randint(low=2, high=8, size=params_rank)\n indices_shape = np.random.randint(low=2, high=8, size=indices_rank)\n indices_shape[-1] = np.random.randint(low=1, high=params_rank + 1)\n\n for _ in range(5):\n input_features = [('params', datatypes.Array(*params_shape)),\n ('indices', datatypes.Array(*indices_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n\n output_shape = list(indices_shape[:-1]) + list(params_shape[indices_shape[-1]:])\n if len(output_shape) > 5:\n continue\n\n builder.add_gather_nd('gather_nd', ['params', 'indices'], 'output')\n\n a = np.random.rand(*params_shape)\n indices_list = []\n for i in range(indices_shape[-1]):\n indices_list.append(np.random.randint(0, params_shape[i], size=indices_shape[:-1]))\n\n indices = np.stack(indices_list, axis=-1)\n input = {'params': a, 'indices': indices.astype(np.float)}\n\n with tf.Graph().as_default(), tf.Session() as sess:\n tf_op = tf.gather_nd(a, indices)\n expected = {'output': sess.run(tf_op)}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n self.assertEqual(-1, builder._get_rank('output'))\n\n def test_gather_nd_gpu(self):\n self.test_gather_nd_cpu(cpu_only=False)\n\n def test_scatter_cpu(self, cpu_only=True):\n for ref_rank, indices_rank in [(i, j) for i in range(1, 6) for j in range(1, 6)]:\n for accumulate_mode in [\"UPDATE\", \"ADD\", \"SUB\", \"MUL\", \"DIV\", \"MAX\", \"MIN\"]:\n for _ in range(5):\n ref_shape = np.random.randint(low=2, high=8, size=ref_rank)\n indices_shape = np.random.randint(low=2, high=8, size=indices_rank)\n updates_shape = list(indices_shape) + list(ref_shape[1:])\n\n input_features = [('ref', datatypes.Array(*ref_shape)),\n ('indices', datatypes.Array(*indices_shape)),\n ('updates', datatypes.Array(*updates_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n\n if len(updates_shape) > 5:\n continue\n\n builder.add_scatter('scatter', ['ref', 'indices', 'updates'], 'output', axis=0,\n mode=accumulate_mode)\n\n ref = np.random.rand(*ref_shape)\n updates = np.random.rand(*updates_shape)\n indices = np.random.randint(0, ref_shape[0], size=indices_shape)\n input = {'ref': ref, 'indices': indices.astype(np.float), 'updates': updates}\n\n with tf.Graph().as_default(), tf.Session() as sess:\n tf_output = tf.Variable(ref)\n sess.run(tf.global_variables_initializer())\n if accumulate_mode == \"UPDATE\":\n sess.run(tf.scatter_update(tf_output, indices, updates))\n if accumulate_mode == \"ADD\":\n sess.run(tf.scatter_add(tf_output, indices, updates))\n if accumulate_mode == \"SUB\":\n sess.run(tf.scatter_sub(tf_output, indices, updates))\n if accumulate_mode == \"MUL\":\n sess.run(tf.scatter_mul(tf_output, indices, updates))\n if accumulate_mode == \"DIV\":\n sess.run(tf.scatter_div(tf_output, indices, updates))\n if accumulate_mode == \"MAX\":\n sess.run(tf.scatter_max(tf_output, indices, updates))\n if accumulate_mode == \"MIN\":\n sess.run(tf.scatter_min(tf_output, indices, updates))\n expected = {'output': sess.run(tf_output)}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_scatter_gpu(self):\n self.test_scatter_cpu(cpu_only=False)\n\n def test_gather_scatter_multiple_axis_cpu(self, cpu_only=True):\n\n for params_rank, indices_rank in [(i, j) for i in range(1, 6) for j in range(1, 6)]:\n for axis in range(-params_rank, params_rank):\n for _ in range(5):\n params_shape = np.random.randint(low=2, high=8, size=params_rank)\n indices_shape = np.random.randint(low=2, high=8, size=indices_rank)\n\n pos_axis = axis if axis >= 0 else axis + params_rank\n output_shape = list(params_shape[:pos_axis]) + list(indices_shape) + list(\n params_shape[pos_axis + 1:])\n\n if len(output_shape) > 5:\n continue\n\n input_features = [('params', datatypes.Array(*params_shape)),\n ('indices', datatypes.Array(*indices_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n\n builder.add_gather('gather', ['params', 'indices'], 'updates', axis=axis)\n builder.add_scatter('scatter', ['params', 'indices', 'updates'], 'output', axis=axis, mode='UPDATE')\n\n a = np.random.rand(*params_shape)\n b = np.random.randint(-params_shape[axis], params_shape[axis], size=indices_shape)\n\n input = {'params': a, 'indices': b.astype(np.float)}\n expected = {'output': a}\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_gather_scatter_multiple_axis_gpu(self):\n self.test_gather_scatter_multiple_axis_cpu(cpu_only=False)\n\n def test_scatter_along_axis_cpu(self, cpu_only=True):\n for rank in range(1, 6):\n for axis in range(-rank, rank):\n for id in range(5):\n ref_shape = np.random.randint(low=2, high=8, size=rank)\n indices_shape = np.copy(ref_shape)\n indices_shape[axis] = np.random.randint(low=1, high=8)\n updates_shape = indices_shape\n\n input_features = [('ref', datatypes.Array(*ref_shape)),\n ('indices', datatypes.Array(*indices_shape)),\n ('updates', datatypes.Array(*updates_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n\n builder.add_scatter_along_axis('scatter_along_axis', ['ref', 'indices', 'updates'], 'output',\n axis=axis, mode=\"UPDATE\")\n\n ref = np.random.rand(*ref_shape)\n updates = np.random.rand(*updates_shape)\n indices = np.random.randint(-ref_shape[axis], ref_shape[axis], size=indices_shape)\n input = {'ref': ref, 'indices': indices.astype(np.float), 'updates': updates}\n\n np_output = np.copy(ref)\n np.put_along_axis(np_output, indices, updates, axis=axis)\n expected = {'output': np_output}\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_scatter_along_axis_gpu(self):\n self.test_scatter_along_axis_cpu(cpu_only=False)\n\n def test_scatter_nd_cpu(self, cpu_only=True):\n for ref_rank, indices_rank in [(i, j) for i in range(1, 6) for j in range(2, 6)]:\n ref_shape = np.random.randint(low=2, high=8, size=ref_rank)\n indices_shape = np.random.randint(low=2, high=8, size=indices_rank)\n indices_shape[-1] = np.random.randint(low=1, high=ref_rank + 1)\n for accumulate_mode in [\"UPDATE\", \"ADD\", \"SUB\"]:\n for id in range(20):\n updates_shape = list(indices_shape[:-1]) + list(ref_shape[indices_shape[-1]:])\n if len(updates_shape) > 5: continue\n\n input_features = [('ref', datatypes.Array(*ref_shape)),\n ('indices', datatypes.Array(*indices_shape)),\n ('updates', datatypes.Array(*updates_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n\n builder.add_scatter_nd('scatter_nd', ['ref', 'indices', 'updates'], 'output', mode=accumulate_mode)\n\n ref = np.random.rand(*ref_shape)\n updates = np.random.rand(*updates_shape)\n indices_list = []\n for i in range(indices_shape[-1]):\n indices_list.append(np.random.randint(0, ref_shape[i], size=indices_shape[:-1]))\n\n indices = np.stack(indices_list, axis=-1)\n\n input = {'ref': ref, 'indices': indices.astype(np.float), 'updates': updates}\n\n with tf.Graph().as_default(), tf.Session() as sess:\n tf_output = tf.Variable(ref)\n sess.run(tf.global_variables_initializer())\n if accumulate_mode == \"UPDATE\":\n sess.run(tf.scatter_nd_update(tf_output, indices, updates))\n if accumulate_mode == \"ADD\":\n sess.run(tf.scatter_nd_add(tf_output, indices, updates))\n if accumulate_mode == \"SUB\":\n sess.run(tf.scatter_nd_sub(tf_output, indices, updates))\n expected = {'output': sess.run(tf_output)}\n\n self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)\n\n def test_scatter_nd_gpu(self):\n self.test_scatter_nd_cpu(cpu_only=False)\n\n def test_layer_normalization_cpu(self, cpu_only=True):\n def layer_norm_numpy(x, shapes, gamma_, beta_, eps=1e-5):\n axes = [-i - 1 for i, _ in enumerate(shapes)]\n num = x - np.mean(x, axis=tuple(axes), keepdims=True)\n dem = np.sqrt(np.sum(np.square(num), axis=tuple(axes),\n keepdims=True) / np.prod(shapes) + eps)\n return num / dem * gamma_ + beta_\n\n for rank in range(1, 6):\n input_shape = np.random.randint(low=2, high=6, size=rank)\n for axis in range(1, len(input_shape) + 1):\n norm_shapes = input_shape[-axis:]\n\n data = np.random.rand(*input_shape)\n\n gamma = np.random.rand(*norm_shapes)\n beta = np.random.rand(*norm_shapes)\n\n input_features = [('data', datatypes.Array(*input_shape))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features, output_features,\n disable_rank5_shape_mapping=True\n )\n\n builder.add_layer_normalization(name='layer_normalization',\n input_name='data',\n output_name='output',\n normalized_shape=norm_shapes,\n gamma=gamma, beta=beta)\n\n inputs = {'data': data}\n ref = layer_norm_numpy(data, norm_shapes, gamma, beta)\n expected = {'output': ref}\n\n self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)\n\n def test_layer_normalization_gpu(self):\n self.test_layer_normalization_cpu(cpu_only=False)\n\n\ndef get_size_after_stride(X, params):\n start = params[\"start\"]\n end = params[\"end\"]\n stride = params[\"stride\"]\n if params[\"axis\"] == 'width': axis = 2\n if params[\"axis\"] == 'height': axis = 1\n if params[\"axis\"] == 'channel': axis = 0\n N = X.shape[axis]\n if end < 0:\n end = end + N\n end = min(end, N)\n if start > N - 1:\n L = 0\n else:\n L = np.floor((end - 1 - start) / stride) + 1\n if L < 0:\n L = 0\n return L\n\n\ndef get_numpy_predictions_slice(X, params):\n start = params[\"start\"]\n end = params[\"end\"]\n stride = params[\"stride\"]\n if params[\"axis\"] == 'width':\n return X[:, :, start:end:stride]\n if params[\"axis\"] == 'height':\n return X[:, start:end:stride, :]\n if params[\"axis\"] == 'channel':\n return X[start:end:stride, :, :]\n\n\ndef get_coreml_predictions_slice(X, params):\n coreml_preds = []\n eval = True\n try:\n input_dim = X.shape\n output_dim = (1, 1,\n 1) # some random dimensions here: we are going to remove this information later\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', datatypes.Array(*output_dim))]\n builder = neural_network.NeuralNetworkBuilder(input_features,\n output_features)\n builder.add_slice('slice', 'data', 'output',\n start_index=params[\"start\"],\n end_index=params[\"end\"], stride=params[\"stride\"],\n axis=params[\"axis\"])\n # Remove output shape by deleting and adding an output\n del builder.spec.description.output[-1]\n output = builder.spec.description.output.add()\n output.name = 'output'\n output.type.multiArrayType.dataType = coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value(\n 'DOUBLE')\n # save the model\n model_dir = tempfile.mkdtemp()\n model_path = os.path.join(model_dir, 'test_layer.mlmodel')\n coremltools.utils.save_spec(builder.spec, model_path)\n # prepare input and get predictions\n coreml_model = coremltools.models.MLModel(model_path)\n coreml_input = {'data': X}\n if is_macos() and macos_version() >= (10, 13):\n coreml_preds = coreml_model.predict(coreml_input)['output']\n else:\n coreml_preds = None\n if os.path.exists(model_dir):\n shutil.rmtree(model_dir)\n except RuntimeError as e:\n print(e)\n eval = False\n\n return coreml_preds, eval\n\n\ndef get_numpy_predictions_reduce(X, params):\n if params[\"axis\"] == 'CHW': axis = (0, 1, 2)\n if params[\"axis\"] == 'HW': axis = (1, 2)\n if params[\"axis\"] == 'C': axis = 0\n if params[\"axis\"] == 'H': axis = 1\n if params[\"axis\"] == 'W': axis = 2\n\n if params[\"mode\"] == 'sum': return np.sum(X, axis)\n if params[\"mode\"] == 'avg': return np.mean(X, axis)\n if params[\"mode\"] == 'prod': return np.prod(X, axis)\n if params[\"mode\"] == 'logsum': return np.sum(np.log(X + 1e-6), axis)\n if params[\"mode\"] == 'sumsquare': return np.sum(X ** 2, axis)\n if params[\"mode\"] == 'L2': return np.sqrt(np.sum(X ** 2, axis))\n if params[\"mode\"] == 'L1': return np.sum(np.abs(X), axis)\n if params[\"mode\"] == 'max': return np.amax(X, axis)\n if params[\"mode\"] == 'min': return np.amin(X, axis)\n if params[\"mode\"] == 'argmax': return np.argmax(X, axis)\n\n\ndef get_coreml_predictions_reduce(X, params):\n coreml_preds = []\n eval = True\n try:\n input_dim = X.shape\n # some random dimensions here: we are going to remove this information later\n output_dim = (1, 1, 1)\n input_features = [('data', datatypes.Array(*input_dim))]\n output_features = [('output', datatypes.Array(*output_dim))]\n builder = neural_network.NeuralNetworkBuilder(input_features,\n output_features)\n builder.add_reduce('reduce', 'data', 'output', axis=params[\"axis\"],\n mode=params[\"mode\"])\n # Remove output shape by deleting and adding an output\n del builder.spec.description.output[-1]\n output = builder.spec.description.output.add()\n output.name = 'output'\n output.type.multiArrayType.dataType = coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value(\n 'DOUBLE')\n # save the model\n model_dir = tempfile.mkdtemp()\n model_path = os.path.join(model_dir, 'test_layer.mlmodel')\n coremltools.utils.save_spec(builder.spec, model_path)\n # prepare input and get predictions\n coreml_model = coremltools.models.MLModel(model_path)\n coreml_input = {'data': X}\n if is_macos() and macos_version() >= (10, 13):\n coreml_preds = coreml_model.predict(coreml_input)['output']\n else:\n coreml_preds = None\n if os.path.exists(model_dir):\n shutil.rmtree(model_dir)\n except RuntimeError as e:\n print(e)\n eval = False\n\n return coreml_preds, eval\n\n\nclass StressTest(CorrectnessTest):\n\n def test_slice_layer(self):\n params_dict = dict(\n input_shape=[[30, 100, 8], [80, 50, 5], [4, 12, 5], [56, 8, 14]],\n axis=['channel', 'height', 'width'],\n start=[0, 1, 2, 5],\n end=[5, 100, 56, -1, -2, -4],\n stride=[1, 2, 3]\n )\n params = list(itertools.product(*params_dict.values()))\n all_candidates = [dict(zip(params_dict.keys(), x)) for x in params]\n valid_params = []\n for pr in all_candidates:\n X = np.random.rand(*pr[\"input_shape\"])\n if get_size_after_stride(X, pr):\n valid_params.append(pr)\n print(\"Total params to be tested: \", len(valid_params),\n \"out of candidates: \", len(all_candidates))\n\n failed_tests_compile = []\n failed_tests_shape = []\n failed_tests_numerical = []\n for i in range(len(valid_params)):\n params = valid_params[i]\n X = np.random.rand(*params[\"input_shape\"])\n np_preds = get_numpy_predictions_slice(X, params)\n coreml_preds, eval = get_coreml_predictions_slice(X, params)\n if eval is False:\n failed_tests_compile.append(params)\n elif coreml_preds is not None:\n if not self._compare_shapes(np_preds, coreml_preds):\n failed_tests_shape.append(params)\n elif not self._compare_predictions(np_preds, coreml_preds):\n failed_tests_numerical.append(params)\n\n self.assertEqual(failed_tests_compile, [])\n self.assertEqual(failed_tests_shape, [])\n self.assertEqual(failed_tests_numerical, [])\n\n def test_reduce_layer(self):\n params_dict = dict(\n input_shape=[[3, 10, 8], [8, 5, 5], [4, 12, 10], [7, 1, 14]],\n mode=['sum', 'avg', 'prod', 'sumsquare', 'L1', 'L2', 'max',\n 'min', 'argmax'],\n axis=['CHW', 'HW', 'C', 'H', 'W'],\n )\n params = list(itertools.product(*params_dict.values()))\n all_candidates = [dict(zip(params_dict.keys(), x)) for x in params]\n valid_params = []\n for pr in all_candidates:\n if pr[\"mode\"] == 'argmax':\n if pr[\"axis\"] == 'CHW' or pr[\"axis\"] == 'HW':\n continue\n valid_params.append(pr)\n print(\"Total params to be tested: \", len(valid_params),\n \"out of candidates: \", len(all_candidates))\n\n failed_tests_compile = []\n failed_tests_shape = []\n failed_tests_numerical = []\n for i in range(len(valid_params)):\n params = valid_params[i]\n X = np.random.rand(*params[\"input_shape\"])\n np_preds = get_numpy_predictions_reduce(X, params)\n coreml_preds, eval = get_coreml_predictions_reduce(X, params)\n if eval is False:\n failed_tests_compile.append(params)\n elif coreml_preds is not None:\n if not self._compare_shapes(np_preds, coreml_preds):\n failed_tests_shape.append(params)\n elif not self._compare_predictions(np_preds, coreml_preds):\n failed_tests_numerical.append(params)\n\n self.assertEqual(failed_tests_compile, [])\n self.assertEqual(failed_tests_shape, [])\n self.assertEqual(failed_tests_numerical, [])\n\n\[email protected](not is_macos() or macos_version() < LAYERS_10_15_MACOS_VERSION,\n 'macOS 10.15+ required. Skipping tests.')\nclass CoreML3NetworkStressTest(CorrectnessTest):\n def test_dyn_weight_conv2d_stress(self):\n options = dict(\n padding = ['valid'],\n filters = [1,2,4],\n kernel_size = [1,3,5], # square kernels\n strides = [1,2],\n dilation_rate = [1],\n )\n\n input_size = 16\n input_channels = 3\n input_dim = (1, input_channels, input_size, input_size)\n\n def conv_spatial_size(image_size, kernel_size, stride, dilation,\n padding):\n if padding == 'valid':\n kernel_size_dilated = (kernel_size - 1) * dilation + 1\n return (image_size - kernel_size_dilated) // stride + 1\n elif padding == 'same':\n return int(math.ceil(image_size * 1.0 / stride))\n else:\n return 0\n\n for x in itertools.product(*options.values()):\n kwargs = dict(zip(options.keys(), x))\n if kwargs['strides'] > 1 and kwargs['dilation_rate'] > 1:\n continue\n # weight layout: (output_channels, kernel_channels, height, width)\n weight_dim = (kwargs['filters'], input_channels,\n kwargs['kernel_size'], kwargs['kernel_size'])\n\n input_features = [\n ('input', datatypes.Array(*input_dim)),\n ('weight', datatypes.Array(*weight_dim))]\n output_features = [('output', None)]\n\n builder = neural_network.NeuralNetworkBuilder(\n input_features,\n output_features,\n disable_rank5_shape_mapping=True)\n\n builder.add_convolution(\n name='two_input_conv_layer',\n kernel_channels=input_channels,\n output_channels=kwargs['filters'],\n height=kwargs['kernel_size'],\n width=kwargs['kernel_size'],\n stride_height=kwargs['strides'],\n stride_width=kwargs['strides'],\n border_mode=kwargs['padding'],\n groups=1,\n W=None,\n b=None,\n has_bias=False,\n dilation_rate=kwargs['dilation_rate'],\n input_name=['input', 'weight'],\n output_name='output')\n\n # Assigning everything to ones should cover the execution path\n # and engine failures, but is not a complete check on numerics.\n out_spatial_size = conv_spatial_size(\n input_size,\n kwargs['kernel_size'],\n kwargs['strides'],\n kwargs['dilation_rate'],\n kwargs['padding'])\n\n input_val = np.ones(input_dim)\n weight_val = np.ones(weight_dim)\n output_dim = (1, kwargs['filters'], out_spatial_size, out_spatial_size)\n expected = np.ones(output_dim) * (kwargs['kernel_size'] * kwargs['kernel_size'] * input_channels)\n\n feed_dict = {'input': input_val, 'weight': weight_val}\n expected = {'output': expected}\n\n self._test_model(builder.spec, feed_dict, expected)\n\n\n def test_power_iteration_cpu(self):\n\n convergence_tolerance = 1e-8\n number_of_iterations = 200\n\n input_features = [('matrix', datatypes.Array(*(2, 2))),\n ('starting_vector', datatypes.Array(*(2,)))]\n\n output_features = [('maximum_eigen_value', datatypes.Array(*(1,))),\n ('eigen_vector', None),\n ('iteration_count', datatypes.Array(*(1,)))]\n\n builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)\n builder.add_expand_dims('expand_dims', 'starting_vector', 'x', axes=[-1])\n builder.add_load_constant_nd('iteration_count', 'iteration_count',\n constant_value=np.zeros((1,)),\n shape=(1,))\n\n loop_layer = builder.add_loop('loop', max_iterations=number_of_iterations)\n loop_body_builder = neural_network.NeuralNetworkBuilder(nn_spec=loop_layer.loop.bodyNetwork)\n # output shape: (n,1)\n loop_body_builder.add_batched_mat_mul('bmm.1', input_names=['matrix', 'x'], output_name='y')\n loop_body_builder.add_reduce_l2('reduce', input_name='y', output_name='norm', axes=0)\n loop_body_builder.add_divide_broadcastable('divide', ['y', 'norm'], 'y_normalized')\n # find diff: 1- abs(cosine)\n loop_body_builder.add_batched_mat_mul('cosine', ['y_normalized', 'x'], 'cosine_diff', transpose_a=True)\n loop_body_builder.add_squeeze('squeeze_all', 'cosine_diff', 'cosine_diff_squeeze', squeeze_all=True)\n loop_body_builder.add_unary('abs_cosine', 'cosine_diff_squeeze', 'abs_cosine_diff', mode='abs')\n loop_body_builder.add_activation('diff', non_linearity='LINEAR',\n input_name='abs_cosine_diff',\n output_name='diff', params=[-1,1])\n\n # update iteration count\n loop_body_builder.add_activation('iteration_count_add', non_linearity='LINEAR',\n input_name='iteration_count',\n output_name='iteration_count_plus_1', params=[1, 1])\n loop_body_builder.add_copy('iteration_count_copy', 'iteration_count_plus_1', 'iteration_count')\n\n # update 'x'\n loop_body_builder.add_copy('update_x', 'y_normalized', 'x')\n\n # add condition to break from the loop, if convergence criterion is met\n loop_body_builder.add_less_than('cond', ['diff'], 'cond', alpha=convergence_tolerance)\n branch_layer = loop_body_builder.add_branch('branch_layer', 'cond')\n builder_ifbranch = neural_network.NeuralNetworkBuilder(nn_spec=branch_layer.branch.ifBranch)\n builder_ifbranch.add_loop_break('break')\n\n # now we are out of the loop, compute the eigen value\n builder.add_batched_mat_mul('bmm.2', input_names=['matrix', 'x'], output_name='x_right')\n builder.add_batched_mat_mul('bmm.3', input_names=['x', 'x_right'], output_name='maximum_eigen_value',\n transpose_a=True)\n builder.add_squeeze('squeeze', 'x', 'eigen_vector', squeeze_all=True)\n\n # make input sizes flexible\n spec = builder.spec\n\n flexible_shape_utils.add_multiarray_ndshape_enumeration(spec, feature_name='matrix',\n enumerated_shapes=[(3,3), (4,4)])\n\n flexible_shape_utils.add_multiarray_ndshape_enumeration(spec, feature_name='starting_vector',\n enumerated_shapes=[(3,), (4,)])\n\n from numpy import linalg as LA\n\n # try on 3x3 matrix\n A = np.array([[2, -6, 8], [-6, 4, 5], [8, 5, 3]], dtype=np.float)\n starting_vector = np.random.rand(3)\n starting_vector = starting_vector / np.sqrt(np.sum(starting_vector ** 2))\n\n e, v = LA.eig(A)\n idx = np.argmax(abs(e))\n input = {'starting_vector': starting_vector, 'matrix': A.astype(np.float)}\n expected = {'maximum_eigen_value': e[idx]}\n self._test_model(spec, input, expected, useCPUOnly=True)\n\n # try on 2x2 matrix\n A = np.array([[4, -5], [-5, 3]], dtype=np.float)\n starting_vector = np.random.rand(2)\n starting_vector = starting_vector / np.sqrt(np.sum(starting_vector ** 2))\n\n e, v = LA.eig(A)\n idx = np.argmax(abs(e))\n\n input = {'starting_vector': starting_vector, 'matrix': A.astype(np.float)}\n expected = {'maximum_eigen_value': e[idx]}\n self._test_model(spec, input, expected, useCPUOnly=True)\n\n\nif __name__ == '__main__':\n unittest.main()\n # suite = unittest.TestSuite()\n # suite.addTest(NewLayersSimpleTest(\"test_softmax_nan_bug_cpu\"))\n # #suite.addTest(SimpleNetworkTest(\"test_power_iteration_cpu\"))\n # unittest.TextTestRunner().run(suite)\n" ]
[ [ "numpy.minimum", "numpy.sqrt", "tensorflow.image.non_max_suppression", "numpy.exp", "numpy.where", "numpy.square", "numpy.matmul", "numpy.sin", "numpy.ceil", "tensorflow.Session", "numpy.zeros", "numpy.log", "tensorflow.gather_nd", "numpy.multiply", "numpy.random.choice", "numpy.power", "tensorflow.placeholder", "numpy.append", "numpy.minimum.reduce", "tensorflow.reverse_sequence", "numpy.floor", "numpy.array", "numpy.tanh", "numpy.sum", "numpy.random.shuffle", "numpy.random.permutation", "numpy.random.uniform", "numpy.take_along_axis", "numpy.split", "numpy.expand_dims", "numpy.arctanh", "numpy.arctan", "numpy.concatenate", "tensorflow.pad", "tensorflow.scatter_div", "tensorflow.scatter_nd_update", "tensorflow.Graph", "numpy.arcsin", "numpy.reshape", "numpy.copy", "numpy.argmax", "numpy.triu", "numpy.cosh", "numpy.min", "numpy.amin", "numpy.arccos", "numpy.tan", "tensorflow.scatter_min", "numpy.random.rand", "numpy.random.binomial", "numpy.arcsinh", "numpy.multiply.reduce", "numpy.maximum", "numpy.sinh", "numpy.ones", "numpy.take", "numpy.around", "tensorflow.scatter_add", "numpy.mean", "numpy.argmin", "numpy.random.randint", "tensorflow.Variable", "numpy.clip", "numpy.eye", "numpy.add.reduce", "numpy.logical_not", "numpy.nonzero", "tensorflow.global_variables_initializer", "numpy.transpose", "numpy.argsort", "numpy.tile", "numpy.percentile", "numpy.cos", "numpy.broadcast_to", "numpy.array_split", "tensorflow.scatter_mul", "tensorflow.scatter_update", "numpy.amax", "numpy.exp2", "numpy.true_divide", "numpy.squeeze", "numpy.lib.stride_tricks.as_strided", "numpy.max", "numpy.tril", "numpy.linalg.eig", "numpy.arange", "numpy.stack", "numpy.testing.assert_almost_equal", "numpy.random.multinomial", "tensorflow.scatter_nd_sub", "numpy.put_along_axis", "numpy.arccosh", "numpy.flip", "numpy.maximum.reduce", "numpy.abs", "numpy.random.seed", "numpy.sign", "numpy.random.normal", "tensorflow.scatter_max", "numpy.prod", "tensorflow.scatter_sub", "tensorflow.scatter_nd_add", "numpy.float" ] ]
maxfrei750/paddle
[ "78d4a585f7cae4eeea3ec47f8d5e41de1dfef1f9", "78d4a585f7cae4eeea3ec47f8d5e41de1dfef1f9" ]
[ "paddle/metrics/utilities.py", "data_preparation/utilities.py" ]
[ "from typing import Literal, Optional\n\nimport torch\nfrom torch import Tensor\nfrom torchvision.ops import box_iou\n\n\ndef mask_iou(\n masks_prediction: Tensor,\n masks_target: Tensor,\n boxes_prediction: Tensor,\n boxes_target: Tensor,\n) -> Tensor:\n \"\"\"Calculates IoU matrix, based on instance masks.\n\n :param masks_prediction: NxHxW Tensor which holds predicted masks.\n :param masks_target: NxHxW Tensor which holds target masks.\n :param boxes_prediction: Nx4 Tensor which holds predicted boxes.\n :param boxes_target: Nx4 Tensor which holds target boxes.\n :return: NxM tensor containing the pairwise IoU values.\n \"\"\"\n\n # TODO: Support detections without bounding boxes by calculating bounding boxes based on masks.\n\n box_iou_matrix = box_iou(boxes_prediction, boxes_target) # predictions x targets\n mask_iou_matrix = torch.zeros_like(box_iou_matrix)\n\n masks_prediction = torch.round(masks_prediction).bool()\n masks_target = masks_target.bool()\n\n for p, (mask_prediction, box_prediction) in enumerate(zip(masks_prediction, boxes_prediction)):\n\n for t, (mask_target, box_target) in enumerate(zip(masks_target, boxes_target)):\n # Only calculate mask iou, if boxes overlap.\n if box_iou_matrix[p, t]:\n x_corners = torch.cat([box_target[0::2], box_prediction[0::2]])\n y_corners = torch.cat([box_target[1::2], box_prediction[1::2]])\n\n x_min = torch.floor(x_corners.min()).int()\n y_min = torch.floor(y_corners.min()).int()\n x_max = torch.ceil(x_corners.max()).int()\n y_max = torch.ceil(y_corners.max()).int()\n\n mask_overlap_target = mask_target[y_min:y_max, x_min:x_max]\n mask_overlap_prediction = mask_prediction[y_min:y_max, x_min:x_max]\n\n area_intersection = torch.logical_and(\n mask_overlap_target, mask_overlap_prediction\n ).sum()\n area_union = torch.logical_or(mask_overlap_target, mask_overlap_prediction).sum()\n\n iou = area_intersection / area_union\n\n mask_iou_matrix[p, t] = iou\n\n return mask_iou_matrix\n\n\ndef calculate_iou_matrix(\n boxes_predicted: Tensor,\n boxes_target: Tensor,\n iou_type: Literal[\"box\", \"mask\"],\n masks_predicted: Optional[Tensor] = None,\n masks_target: Optional[Tensor] = None,\n):\n \"\"\"Calculate the Intersections over Unions (IOUs) of N predictions with M targets.\n Supports both box and mask IOU.\n\n\n :param boxes_predicted: Bounding boxes of predictions (Tensor[Nx4]).\n :param boxes_target: Bounding boxes of targets (Tensor[Mx4]).\n :param iou_type: Either \"mask\" or \"box\". Controls which kind of IOU is calculated.\n :param masks_predicted: Masks of predictions (Tensor[NxHxW]). Only needed if `iou_type` is\n \"mask\".\n :param masks_target: Masks of targets (Tensor[MxHxW). Only needed if `iou_type` is \"mask\".\n :return: Intersections over Unions (Tensor[NxM])\n \"\"\"\n if iou_type == \"box\":\n ious = box_iou(boxes_predicted, boxes_target)\n elif iou_type == \"mask\":\n\n if masks_target is None or boxes_target is None:\n raise ValueError(\n \"`masks_target` and `boxes_target` are required, if `iou_type` is 'mask'.\"\n )\n\n ious = mask_iou(\n masks_predicted,\n masks_target,\n boxes_predicted,\n boxes_target,\n )\n else:\n raise ValueError(f\"Unknown iou_type: {iou_type}. Expected 'box' or 'mask'.\")\n\n return ious\n", "import random\nimport shutil\nfrom pathlib import Path\n\nimport cv2\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nfrom tqdm import tqdm\n\nfrom data_preparation.sopat_catalyst import AnyPath\n\n\ndef split(data_root: AnyPath, validation_percentage: float, seed: int = 42):\n \"\"\"Split data into a training and a validation set.\n\n :param data_root: Path, where images and mask folders are stored.\n :param validation_percentage: Percentage of images to put into the validation set.\n :param seed: random seed\n :return:\n \"\"\"\n\n if not validation_percentage <= 1:\n raise ValueError(\"`validation_percentage` must be in the range [0, 1].\")\n\n random.seed(seed)\n data_root = Path(data_root)\n\n validation_folder_path = data_root / \"validation\"\n validation_folder_path.mkdir(parents=True, exist_ok=True)\n\n training_folder_path = data_root / \"training\"\n training_folder_path.mkdir(parents=True, exist_ok=True)\n\n image_paths = list(data_root.glob(\"image_*.png\"))\n num_images = len(image_paths)\n\n num_images_validation = round(validation_percentage * num_images)\n image_paths_validation = random.sample(image_paths, num_images_validation)\n\n _move_files(data_root, image_paths_validation, validation_folder_path)\n\n image_paths_training = data_root.glob(\"image_*.png\")\n\n _move_files(data_root, image_paths_training, training_folder_path)\n\n\ndef _move_files(data_root, image_paths, subset_folder_path):\n for image_path in image_paths:\n image_id = image_path.stem[6:]\n mask_paths = data_root.glob(f\"**/mask_{image_id}*.*\")\n\n for mask_path in mask_paths:\n class_name = mask_path.parent.name\n new_mask_path = subset_folder_path / class_name / mask_path.name\n new_mask_path.parent.mkdir(parents=True, exist_ok=True)\n\n shutil.move(mask_path, new_mask_path)\n\n shutil.move(image_path, subset_folder_path / image_path.name)\n\n\ndef convert_imagej_csv_to_masks_batch(\n data_root: AnyPath,\n image_folder: AnyPath,\n csv_file_glob: AnyPath,\n output_root: AnyPath,\n mask_folder_name: str,\n) -> None:\n \"\"\"Convert circular/elliptical ImageJ annotations to binary masks. Also converts and renames\n associated input images to png.\n\n :param data_root: Path, where ImageJ results csv-file and associated images are stored.\n :param image_folder: Path of the folder where the input images are stored, relative to data_root\n :param csv_file_glob: glob expression to identify ImageJ csv files, relative to the data_root.\n :param output_root: Path of the folder, were resulting binary masks and images are stored,\n relative to data_root.\n :param mask_folder_name: name of the folder, where masks are stored. Will be created in\n output_root.\n \"\"\"\n\n data_root = Path(data_root)\n\n for csv_file_path in tqdm(data_root.glob(csv_file_glob)):\n convert_imagej_csv_to_masks(\n data_root, image_folder, csv_file_path, output_root, mask_folder_name\n )\n\n\ndef convert_imagej_csv_to_masks(\n data_root: AnyPath,\n image_folder: AnyPath,\n csv_path: AnyPath,\n output_root: AnyPath,\n mask_folder_name: str,\n) -> None:\n \"\"\"Convert circular/elliptical ImageJ annotations to binary masks. Also converts and renames\n associated input images to png.\n\n :param data_root: Path, where ImageJ results csv-file and associated images are stored.\n :param image_folder: Path of the folder where the input images are stored, relative to data_root\n :param csv_path: Path of the ImageJ results csv-file, relative to data_root.\n :param output_root: Path, where the output mask folder and images reside.\n :param mask_folder_name: name of the folder, where masks are stored. Will be created in\n output_root.\n \"\"\"\n\n data_root = Path(data_root)\n output_root = Path(output_root)\n\n mask_folder_path = output_root / mask_folder_name\n mask_folder_path.mkdir(exist_ok=True, parents=True)\n\n csv_data = pd.read_csv(csv_path)\n\n image_file_names = csv_data[\"Label\"].unique()\n\n for image_file_name in image_file_names:\n annotation_data = csv_data[csv_data[\"Label\"] == image_file_name]\n\n image_path = data_root / image_folder / image_file_name\n image_name = Path(image_file_name).stem\n image = Image.open(image_path)\n image.save(output_root / f\"image_{image_name}.png\")\n\n for annotation_index, annotation in annotation_data.iterrows():\n mask = np.zeros_like(image)\n\n center = (int(annotation[\"X\"]), int(annotation[\"Y\"]))\n axes = (int(annotation[\"Major\"] / 2), int(annotation[\"Minor\"] / 2))\n angle = 360 - annotation[\"Angle\"]\n\n mask = cv2.ellipse(mask, center, axes, angle, 0, 360, color=1, thickness=-1)\n mask = Image.fromarray(mask.astype(bool))\n mask.save(mask_folder_path / f\"mask_{image_name}_{annotation_index}.png\")\n" ]
[ [ "torch.cat", "torch.round", "torch.zeros_like", "torch.logical_and", "torch.logical_or" ], [ "pandas.read_csv", "numpy.zeros_like" ] ]
Yao-14/stAnalysis
[ "d08483ce581f5b03cfcad8be500aaa64b0293f74" ]
[ "stRT/tdr/models/voxel/voxel_model.py" ]
[ "import numpy as np\nimport pyvista as pv\nfrom pyvista import PolyData, UnstructuredGrid\n\ntry:\n from typing import Literal\nexcept ImportError:\n from typing_extensions import Literal\n\nfrom typing import Optional, Union\n\nfrom ..utilities import add_model_labels, merge_models\n\n#########################################################\n# Construct cell-level voxel model based on point cloud #\n#########################################################\n\n\ndef voxelize_pc(\n pc: PolyData,\n voxel_size: Optional[np.ndarray] = None,\n) -> UnstructuredGrid:\n \"\"\"\n Voxelize the point cloud.\n\n Args:\n pc: A point cloud model.\n voxel_size: The size of the voxelized points. The shape of voxel_size is (pc.n_points, 3).\n\n Returns:\n voxel: A voxel model.\n \"\"\"\n # Check open3d package\n try:\n import PVGeo\n except ImportError:\n raise ImportError(\n \"You need to install the package `PVGeo`. \\nInstall PVGeo via `pip install PVGeo`\"\n )\n\n voxelizer = PVGeo.filters.VoxelizePoints()\n\n if not (voxel_size is None):\n voxelizer.set_deltas(voxel_size[:, 0], voxel_size[:, 1], voxel_size[:, 2])\n voxelizer.set_estimate_grid(False)\n\n voxel_pc = voxelizer.apply(pc)\n\n # add labels\n pc_keys = pc.point_data.keys()\n if not (pc_keys is None):\n for key in pc_keys:\n voxel_pc.cell_data[key] = pc.point_data[key]\n\n return voxel_pc\n\n\n##################################################################\n# Construct cell-level or tissue-level voxel model based on mesh #\n##################################################################\n\n\ndef voxelize_mesh(\n mesh: Union[PolyData, UnstructuredGrid],\n voxel_pc: Union[PolyData, UnstructuredGrid] = None,\n key_added: str = \"groups\",\n label: str = \"voxel\",\n color: Optional[str] = \"gainsboro\",\n alpha: Union[float, int] = 1.0,\n smooth: Optional[int] = 200,\n) -> UnstructuredGrid:\n \"\"\"\n Construct a volumetric mesh based on surface mesh.\n\n Args:\n mesh: A surface mesh model.\n voxel_pc: A voxel model which contains the `voxel_pc.cell_data['obs_index']` and `voxel_pc.cell_data[key_added]`.\n key_added: The key under which to add the labels.\n label: The label of reconstructed voxel model.\n color: Color to use for plotting mesh. The default color is `'gainsboro'`.\n alpha: The opacity of the color to use for plotting model. The default alpha is `0.8`.\n smooth: The smoothness of the voxel model.\n\n Returns:\n voxel_model: A reconstructed voxel model, which contains the following properties:\n `voxel_model.cell_data[key_added]`, the `label` array;\n `voxel_model.cell_data[f'{key_added}_rgba']`, the rgba colors of the `label` array.\n `voxel_model.cell_data['obs_index']`, the cell labels if not (voxel_pc is None).\n \"\"\"\n\n density = mesh.length / smooth\n voxel_model = pv.voxelize(mesh, density=density, check_surface=False)\n\n # Add labels and the colormap of the volumetric mesh\n labels = np.array([label] * voxel_model.n_cells).astype(str)\n add_model_labels(\n model=voxel_model,\n labels=labels,\n key_added=key_added,\n where=\"cell_data\",\n colormap=color,\n alphamap=alpha,\n inplace=True,\n )\n if not (voxel_pc is None):\n voxel_model.cell_data[\"obs_index\"] = np.asarray(\n [\"no_cell\"] * voxel_model.n_cells\n ).astype(str)\n voxel_model = merge_models(models=[voxel_model, voxel_pc])\n\n return voxel_model\n" ]
[ [ "numpy.asarray", "numpy.array" ] ]
xh-diagrams/xhorizon
[ "20b3f2f0f621ca2a31c9f6a1d5fcd06692a700ce" ]
[ "src/xhorizon/diagram_tools/diagram_classes.py" ]
[ "\n\"\"\"\nThis module defines the classes\n\tdiagram\n\tregion\n\tblock\nwhich are used to construct and plot diagrams.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom xhorizon.diagram_tools import coord_transf as coord\nfrom xhorizon.diagram_tools.block_masks import rstar_minmax, uv_range\nfrom xhorizon.diagram_tools import block_fill\n\n\n\nclass diagram:\n\n\t\"\"\"\n\tA diagram is fundamentally a collection of curves.\n\tA diagram object consists of:\n\t\t(1) a list of curves in UV coordinates, and \n\t\t(2) a list of sss regions, each of which bring their own curves.\n\t\"\"\"\n\n\tdef __init__(self):\n\t\tself.curves = []\n\t\tself.regions = []\n\n\tdef add_curves_UV(self, crvlist):\n\t\tpass\n\n\tdef dplot(self):\n\t\t### \"diagram plot\"\n\t\tfor reg in self.regions:\n\t\t\treg.rplot()\n\n\n\n\nclass region:\n\n\t\"\"\"\n\tA region object corresponds to a single piece of an sss spacetime with metric function f(r).\n\tThis will often, but not always, be a single Eddington-Finklestein region of the spacetime.\n\tA region object consists of:\n\t\t(1) a metfunc object which determines the metric function f(r) and its associated transformations\n\t\t(2) a list of curve objects explicitly belonging to the region\n\t\t(3) a list of block objects belonging to the region\n\t\t(4) methods providing transformations from region to diagram coordinates\n\t\"\"\"\n\n\tdef __init__(self, metfunc, rparams={}):\n\t\tself.metfunc = metfunc\n\t\tself.rparams = dict(c=0., s0=10.)\n\t\tself.rparams.update(rparams)\n\t\tself.curves = []\n\t\tself.blocks = []\n\n\t\"\"\"\n\tUV to uvdl transformations.\n\t\"\"\"\n\n\tdef UV_of_uvdl(self, uvdl):\n\t\tudl, vdl = uvdl\n\t\tU = self.U_of_udl(udl)\n\t\tV = self.V_of_vdl(vdl)\n\t\tUV = np.array([U,V])\n\t\treturn UV\n\n\tdef uvdl_of_UV(self, UV):\n\t\tU, V = UV\n\t\tudl = self.udl_of_U(U)\n\t\tvdl = self.vdl_of_V(V)\n\t\tuvdl = np.array([udl,vdl])\n\t\treturn uvdl\n\n\t\"\"\"\n\tNull direction transformations.\n\t\"\"\"\n\n\t## forward\n\n\tdef U_of_udl(self,udl):\n\t\tU = 1. * udl\n\t\treturn U\n\n\tdef V_of_vdl(self,vdl):\n\t\tV = 1. * vdl\n\t\treturn V\n\n\t## backward\n\n\tdef udl_of_U(self,U):\n\t\tudl = 1. * U\n\t\treturn udl\n\n\tdef vdl_of_V(self,V):\n\t\tvdl = 1. * V\n\t\treturn vdl\n\n\t\"\"\"\n\tUtilities.\n\t\"\"\"\n\n\tdef add_block(self, j, bparams):\n\t\t\"\"\"\n\t\tAdd a new block to the region, given index j and parameters bparams.\n\t\t\"\"\"\n\t\tb = block(self, j, bparams)\n\t\tif b.consistency_check()==True:\n\t\t\tself.blocks += [b]\n\t\treturn b\n\n\tdef add_curves_uvdl(self, crvlist):\n\t\tpass\n\t\n\t\"\"\"\n\tPlotting.\n\t\"\"\"\n\n\tdef rplot(self):\n\t\t### \"region plot\"\n\t\tfor b in self.blocks:\n\t\t\tb.bplot()\n\n\n\n\nclass block:\n\n\t\"\"\"\n\tA block object corresponds to a single sss block.\n\tIt is owned by a region, its \"master\", which determines its metric function and region params c,s0.\n\n\tThe block object does most of the heavy lifting for creating a diagram.\n\t\"\"\"\n\n\tdef __init__(self, master, j, bparams={}):\n\t\t## primary attributes\n\t\tself.master = master\n\t\tself.j = int(j)\n\t\tself.bparams = dict(cdlu=0., cdlv=0., epsu=1., epsv=1.)\n\t\tself.bparams.update(bparams)\n\t\tself.curves = []\n\t\tself.masks = []\n\t\tself.uvbounds = dict(umin=-np.inf, umax=np.inf, vmin=-np.inf, vmax=np.inf)\n\t\t## derived attributes\n\t\tself.rj = None\n\t\tself.kj = None\n\t\tself.kpm = None\n\t\tself.sgnf = None\n\t\tself.F = None\n\t\tself.Finv = None\n\t\tself.s0 = None\n\t\tself.c = None\n\t\t## coordinate transformations\n\t\tself.uv_of_tr = None\n\t\tself.tr_of_uv = None\n\t\tself.r_of_uv = None\n\t\tself.uvdl_of_uv = None\n\t\tself.uv_of_uvdl = None\n\t\t## supplementary functions\n\t\tself.h = None\n\t\tself.hinv = None\n\t\t## set up block\n\t\tself.masks += [ rstar_minmax, uv_range ]\n\t\tself.update()\n\n\t\"\"\"\n\tMethods for use by the user.\n\t\"\"\"\n\n\tdef update(self):\n\t\t\"\"\"\n\t\tUse current block primary attributes to set derived attributes and coordinate transformations.\n\t\tCheck for consistency.\n\t\t\"\"\"\n\t\tself.update_derived_attributes()\n\t\tself.consistency_check()\n\t\tself.update_coord_transf()\n\n\tdef add_curves_uv(self, crvlist):\n\t\t\"\"\"\n\t\tAdd a curve to the block, using uv coords to fill all other coordinate fields.\n\t\t\"\"\"\n\t\tself.curves += self.apply_masks(self.update_curves_from_uv(crvlist))\n\n\tdef refresh_curves_from_uv(self):\n\t\t\"\"\"\n\t\tRecalculate coordinates of all curves based on the current uv coords.\n\t\t\"\"\"\n\t\tself.curves = self.apply_masks(self.update_curves_from_uv(self.curves))\n\n\tdef add_curves_tr(self, crvlist):\n\t\t\"\"\"\n\t\tAdd a curve to the block, using tr coords to fill all other coordinate fields.\n\t\tOnly curves lying entirely within the block can be added.\n\t\tIf any points have an invalid radius, whole curve is discarded.\n\t\t\"\"\"\n\t\tself.curves += self.apply_masks(self.update_curves_from_tr(crvlist))\n\n\tdef update_curves_from_tr(self, crvlist):\n\t\t\"\"\"\n\t\tUpdate curves to the block, using tr coords to fill all other coordinate fields.\n\t\tOnly curves lying entirely within the block can be added.\n\t\tIf any points have an invalid radius, whole curve is discarded.\n\t\t\"\"\"\n\t\tcrvlist2 = []\n\t\tfor crv in crvlist:\n\t\t\tt, r = crv.tr\n\t\t\tmask = np.logical_and( self.rj[0]<r , r<self.rj[1] )\n\t\t\tif not np.any(mask==False):\n\t\t\t\tcrv.uv = self.uv_of_tr(crv.tr)\n\t\t\t\tcrvlist2 += [crv]\n\t\tcrvlist2 = self.update_curves_from_uv(crvlist2)\n\t\treturn crvlist2\n\n\tdef fill(self, sty={}):\n\t\t\"\"\"\n\t\tFill in the block with a background patch.\n\t\t\"\"\"\n\t\tblock_fill.fill_block(self, sty=sty)\n\n\tdef fill_between_r(self, rvals=np.array([0.,0.]), sty={}, npoints=1000, inf=100):\n\t\t\"\"\"\n\t\tFill block between r values if valid.\n\t\t\"\"\"\n\t\tblock_fill.fill_between_r(self, rvals=rvals, sty=sty, npoints=npoints, inf=inf)\n\n\n\tdef bplot(self):\n\t\t\"\"\"\n\t\tPlot all curves belonging to the block.\n\t\t\"\"\"\n\t\tself.refresh_curves_from_uv()\n\t\tfor crv in self.curves:\n\t\t\tU, V = crv.UV[0], crv.UV[1]\n\t\t\tplt.plot(V-U, V+U, **crv.sty)\n\n\t\"\"\"\n\tMethods called by other methods.\n\t\"\"\"\n\n\tdef update_derived_attributes(self):\n\t\t\"\"\"\n\t\tUse the current block primary attributes to set the derived attributes.\n\t\t\"\"\"\n\t\t## from metfunc\n\t\tself.rj = self.master.metfunc.rj[self.j:self.j+2]\n\t\tself.kj = self.master.metfunc.kj[self.j:self.j+2]\n\t\tself.kpm = np.sort( self.kj )\n\t\tself.sgnf = self.master.metfunc.sgnf(self.j)\n\t\tself.F = lambda r: self.master.metfunc.F(r)\n\t\tself.Finv = lambda rstar: self.master.metfunc.Finv(self.j, rstar)\n\t\t## from region\n\t\tself.s0 = float(self.master.rparams['s0'])\n\t\tself.c = float(self.master.rparams['c'])\n\n\tdef update_coord_transf(self):\n\t\t\"\"\"\n\t\tUse the current block attributes to set the coordinate transformations.\n\t\t\"\"\"\n\t\t## extract block parameters from dict\n\t\tcdlu, cdlv = float(self.bparams['cdlu']), float(self.bparams['cdlv'])\n\t\tepsu, epsv = float(self.bparams['epsu']), float(self.bparams['epsv'])\n\t\t## define coord transformations with current parameters\n\t\tself.uv_of_tr = lambda tr: coord.uv_of_tr(tr, self.F, self.c)\n\t\tself.tr_of_uv = lambda uv: coord.tr_of_uv(uv, self.Finv, self.c)\n\t\tself.r_of_uv = lambda uv: coord.r_of_uv(uv, self.Finv, self.c)\n\t\tself.uvdl_of_uv = lambda uv: coord.uvdl_of_uv(uv , cdlu, cdlv, epsu, epsv, self.kpm, self.s0)\n\t\tself.uv_of_uvdl = lambda uvdl: coord.uv_of_uvdl(uvdl, cdlu, cdlv, epsu, epsv, self.kpm, self.s0)\n\t\t## define supplementary functions for reference\n\t\tself.h = lambda s: coord.hks(s, self.kpm, self.s0)\n\t\tself.hinv = lambda s: coord.hksinv(s, self.kpm, self.s0)\n\n\tdef consistency_check(self):\n\t\t\"\"\"\n\t\tCheck if current block attributes are self-consistent.\n\t\tTrue if consistent. False if not.\n\t\t\"\"\"\n\t\t## initialize\n\t\tconsistent = True\n\t\tmessages = [\n\t\t\t\"WARNING: Block attributes are not self-consistent.\",\n\t\t\t\"Block Info: %s\"%(self.master.metfunc.info),\n\t\t\t\"Block Info: j = %r\"%(self.j),\n\t\t\t\"Issues:\",\n\t\t\t]\n\t\t## # valid interval label?\n\t\tif not self.j in range(len(self.master.metfunc.rj)-1):\n\t\t\tconsistent = False\n\t\t\tmessages += [\"Check block index.\", \"j=%r, jmax=%r\"%( self.j, (len(self.master.metfunc.rj)-2) )]\n\t\t## # valid block orientation?\n\t\tepsu, epsv = float(self.bparams['epsu']), float(self.bparams['epsv'])\n\t\tif not epsu*epsv == self.sgnf:\n\t\t\tconsistent = False\n\t\t\tmessages += [\"Check block orientation.\", \"epsu*epsv.sgnf = %r\"%( epsu*epsv/self.sgnf )]\n\t\t## # warn if not consistent\n\t\tif consistent == False:\n\t\t\tmessages += [\"END WARNING\"]\n\t\t\tprint('\\n' + '\\n'.join(messages) + '\\n')\n\t\t## return\n\t\treturn consistent\n\n\tdef update_curves_from_uv(self, crvlist):\n\t\t\"\"\"\n\t\tUpdate curves in the block, using uv coords to fill all other coordinate fields.\n\t\t\"\"\"\n\t\tfor crv in crvlist:\n\t\t\tcrv.r = self.r_of_uv(crv.uv)\n\t\t\tcrv.tr = self.tr_of_uv(crv.uv)\n\t\t\tcrv.uvdl = self.uvdl_of_uv(crv.uv)\n\t\t\tcrv.UV = self.master.UV_of_uvdl(crv.uvdl)\n\t\treturn crvlist\n\n\tdef apply_masks(self, crvlist):\n\t\t\"\"\"\n\t\tApply all masks to self.curves.\n\t\tMasks are functions of the form \n\t\t\tmask(block, crvlist): return new_crvlist\n\t\t\"\"\"\n\t\tfor mask in self.masks:\n\t\t\tcrvlist = mask(self, crvlist)\n\t\treturn crvlist\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.logical_and", "numpy.sort", "matplotlib.pyplot.plot", "numpy.any", "numpy.array" ] ]
nik849/ct-tools
[ "1218f62de1b8974651506ebf8fd9197c1b038a0a" ]
[ "ct-tools/recon.py" ]
[ "import axitom\nimport numpy as np\nimport glob\nfrom os.path import *\nimport matplotlib.pyplot as plt\n\n\nclass data(object):\n\n\n def __init__(self, dataset=None, xtekct=None, debug=None, **kwargs):\n self.debug = debug\n if dataset is not None:\n self.dataset = sorted([os.path.join(data, f) for f in os.listdir(data) if f.endswith('.tif')])\n else:\n self.dataset = sorted(glob.glob(\".tif\"))\n if xtekct is not None:\n self.config = axitom.config_from_xtekct(xtekct)\n else:\n self.config = axitom.config_from_xtekct(glob.glob(\"*.xtekct\")[0])\n if self.debug:\n print(self.dataset)\n print(self.config)\n\n\n def load_dataset(self, corrected=True):\n self.stack = [np.array(axitom.read_image(image, flat_corrected=corrected)) for image in self.dataset]\n if self.debug:\n print(self.stack)\n return self.stack\n\n\n def COR(self, stack, slice_number=None, background=0.9):\n if slice_number is not None:\n self.slice = slice_number\n else:\n if len(self.dataset) >= 2:\n self.slice = int(len(self.dataset)/2)\n _, center_offset = axitom.object_center_of_rotation(stack[self.slice], self.config, background_internsity=background)\n self.config.center_of_rot_y = center_offset\n self.config.update()\n if self.debug:\n print(center_offset)\n\n\n def recon(self, stack):\n tomo = [axitom.fdk(slice, self.config) for slice in self.stack]\n plt.title(\"1st Reconstructed Radial slice\")\n plt.imshow(tomo[0].transpose(), cmap=plt.cm.cividis)\n" ]
[ [ "matplotlib.pyplot.title" ] ]
pwmdebuck/dymos-1
[ "0fb3e91e8c32b34fca41e8c1ec9bec66f31af341", "0fb3e91e8c32b34fca41e8c1ec9bec66f31af341" ]
[ "dymos/examples/racecar/problemSolver.py", "dymos/examples/brachistochrone/test/test_brachistochrone_undecorated_ode.py" ]
[ "import numpy as np\nimport openmdao.api as om\nimport dymos as dm\nimport matplotlib.pyplot as plt\nfrom combinedODE import CombinedODE\nimport matplotlib as mpl\n\n#track curvature imports\nfrom scipy import interpolate\nfrom scipy import signal\nfrom Track import Track\nimport tracks\nfrom spline import getSpline,getTrackPoints,getGateNormals,reverseTransformGates,setGateDisplacements,transformGates\nfrom linewidthhelper import *\n\nprint('Config: RWD single thrust')\n\ntrack = tracks.ovaltrack #change track here and in curvature.py. Tracks are defined in tracks.py\nplot = True #plot track and telemetry\n\npoints = getTrackPoints(track) #generate nodes along the centerline for curvature calculation (different than collocation nodes)\nfinespline,gates,gatesd,curv,slope = getSpline(points,s=0.0) #fit the centerline spline. by default 10000 points\ns_final = track.getTotalLength()\n\n# Define the OpenMDAO problem\np = om.Problem(model=om.Group())\n\n# Define a Trajectory object\ntraj = dm.Trajectory()\np.model.add_subsystem('traj', subsys=traj)\n\n# Define a Dymos Phase object with GaussLobatto Transcription\nphase = dm.Phase(ode_class=CombinedODE,\n\t\t transcription=dm.GaussLobatto(num_segments=50, order=3,compressed=True))\n\ntraj.add_phase(name='phase0', phase=phase)\n\n# Set the time options, in this problem we perform a change of variables. So 'time' is actually 's' (distance along the track centerline)\n# This is done to fix the collocation nodes in space, which saves us the calculation of the rate of change of curvature.\n# The state equations are written with respect to time, the variable change occurs in timeODE.py\nphase.set_time_options(fix_initial=True,fix_duration=True,duration_val=s_final,targets=['curv.s'],units='m',duration_ref=s_final,duration_ref0=10)\n\n#Define states\nphase.add_state('t', fix_initial=True, fix_final=False, units='s', lower = 0,rate_source='dt_ds',ref=100) #time\nphase.add_state('n', fix_initial=False, fix_final=False, units='m', upper = 4.0, lower = -4.0, rate_source='dn_ds',targets=['n'],ref=4.0) #normal distance to centerline. The bounds on n define the width of the track\nphase.add_state('V', fix_initial=False, fix_final=False, units='m/s', ref = 40, ref0=5,rate_source='dV_ds', targets=['V']) #velocity\nphase.add_state('alpha', fix_initial=False, fix_final=False, units='rad', rate_source='dalpha_ds',targets=['alpha'],ref=0.15) #vehicle heading angle with respect to centerline\nphase.add_state('lambda', fix_initial=False, fix_final=False, units='rad',rate_source='dlambda_ds',targets=['lambda'],ref=0.01) #vehicle slip angle, or angle between the axis of the vehicle and velocity vector (all cars drift a little)\nphase.add_state('omega', fix_initial=False, fix_final=False, units='rad/s',rate_source='domega_ds',targets=['omega'],ref=0.3) #yaw rate\nphase.add_state('ax',fix_initial=False,fix_final=False,units='m/s**2',rate_source='dax_ds',targets=['ax'],ref=8) #longitudinal acceleration\nphase.add_state('ay',fix_initial=False,fix_final=False,units='m/s**2', rate_source='day_ds',targets=['ay'],ref=8) #lateral acceleration\n\n#Define Controls\nphase.add_control(name='delta', units='rad', lower=None, upper=None,fix_initial=False,fix_final=False, targets=['delta'],ref=0.04) #steering angle\nphase.add_control(name='thrust', units=None,fix_initial=False,fix_final=False, targets=['thrust']) #the thrust controls the longitudinal force of the rear tires and is positive while accelerating, negative while braking\n\n#Performance Constraints\npmax = 960000 #W\nphase.add_path_constraint('power',shape=(1,),units='W',upper=pmax,ref=100000) #engine power limit\n\n#The following four constraints are the tire friction limits, with 'rr' designating the rear right wheel etc. This limit is computed in tireConstraintODE.py\nphase.add_path_constraint('c_rr',shape=(1,),units=None,upper=1)\nphase.add_path_constraint('c_rl',shape=(1,),units=None,upper=1)\nphase.add_path_constraint('c_fr',shape=(1,),units=None,upper=1)\nphase.add_path_constraint('c_fl',shape=(1,),units=None,upper=1)\n\n#Some of the vehicle design parameters are available to set here. Other parameters can be found in their respective ODE files.\nphase.add_design_parameter('M',val=800.0,units='kg',opt=False,targets=['car.M','tire.M','tireconstraint.M','normal.M'],dynamic=False) #vehicle mass\nphase.add_design_parameter('beta',val=0.62,units=None,opt=False,targets=['tire.beta'],dynamic=False) #brake bias\nphase.add_design_parameter('CoP',val=1.6,units='m',opt=False,targets=['normal.CoP'],dynamic=False) #center of pressure location\nphase.add_design_parameter('h',val=0.3,units='m',opt=False,targets=['normal.h'],dynamic=False) #center of gravity height\nphase.add_design_parameter('chi',val=0.5,units=None,opt=False,targets=['normal.chi'],dynamic=False) #roll stiffness\nphase.add_design_parameter('ClA',val=4.0,units='m**2',opt=False,targets=['normal.ClA'],dynamic=False) #downforce coefficient*area\nphase.add_design_parameter('CdA',val=2.0,units='m**2',opt=False,targets=['car.CdA'],dynamic=False) #drag coefficient*area\n\n\n#Minimize final time.\nphase.add_objective('t', loc='final') #note that we use the 'state' time instead of Dymos 'time'\n\n#Add output timeseries\nphase.add_timeseries_output('lambdadot',units='rad/s',shape=(1,))\nphase.add_timeseries_output('Vdot',units='m/s**2',shape=(1,))\nphase.add_timeseries_output('alphadot',units='rad/s',shape=(1,))\nphase.add_timeseries_output('omegadot',units='rad/s**2',shape=(1,))\nphase.add_timeseries_output('power',units='W',shape=(1,))\nphase.add_timeseries_output('sdot',units='m/s',shape=(1,))\nphase.add_timeseries_output('c_rr',units=None,shape=(1,))\nphase.add_timeseries_output('c_fl',units=None,shape=(1,))\nphase.add_timeseries_output('c_fr',units=None,shape=(1,))\nphase.add_timeseries_output('c_rl',units=None,shape=(1,))\nphase.add_timeseries_output('N_rr',units='N',shape=(1,))\nphase.add_timeseries_output('N_fr',units='N',shape=(1,))\nphase.add_timeseries_output('N_fl',units='N',shape=(1,))\nphase.add_timeseries_output('N_rl',units='N',shape=(1,))\nphase.add_timeseries_output('curv.kappa',units='1/m',shape=(1,))\n\n#Link the states at the start and end of the phase in order to ensure a continous lap\ntraj.link_phases(phases=['phase0', 'phase0'], vars=['V','n','alpha','omega','lambda','ax','ay'], locs=('++', '--'))\n\n# Set the driver. IPOPT or SNOPT are recommended but SLSQP might work.\np.driver = om.pyOptSparseDriver(optimizer='IPOPT')\n\np.driver.opt_settings['mu_init'] = 1e-3\np.driver.opt_settings['max_iter'] = 500\np.driver.opt_settings['acceptable_tol'] = 1e-3\np.driver.opt_settings['constr_viol_tol'] = 1e-3\np.driver.opt_settings['compl_inf_tol'] = 1e-3\np.driver.opt_settings['acceptable_iter'] = 0\np.driver.opt_settings['tol'] = 1e-3\np.driver.opt_settings['hessian_approximation'] = 'exact'\np.driver.opt_settings['nlp_scaling_method'] = 'none'\np.driver.opt_settings['print_level'] = 5\n\n\n# Allow OpenMDAO to automatically determine our sparsity pattern.\n# Doing so can significant speed up the execution of Dymos.\np.driver.declare_coloring()\n\n# Setup the problem\np.setup(check=True) #force_alloc_complex=True\n# Now that the OpenMDAO problem is setup, we can set the values of the states.\n\n#States\np.set_val('traj.phase0.states:V',phase.interpolate(ys=[20,20], nodes='state_input'),units='m/s') #non-zero velocity in order to protect against 1/0 errors.\np.set_val('traj.phase0.states:lambda',phase.interpolate(ys=[0.0,0.0], nodes='state_input'),units='rad') #all other states start at 0\np.set_val('traj.phase0.states:omega',phase.interpolate(ys=[0.0,0.0], nodes='state_input'),units='rad/s')\np.set_val('traj.phase0.states:alpha',phase.interpolate(ys=[0.0,0.0], nodes='state_input'),units='rad')\np.set_val('traj.phase0.states:ax',phase.interpolate(ys=[0.0,0.0], nodes='state_input'),units='m/s**2')\np.set_val('traj.phase0.states:ay',phase.interpolate(ys=[0.0,0.0], nodes='state_input'),units='m/s**2')\np.set_val('traj.phase0.states:n',phase.interpolate(ys=[0.0,0.0], nodes='state_input'),units='m')\np.set_val('traj.phase0.states:t',phase.interpolate(ys=[0.0,100.0], nodes='state_input'),units='s') #initial guess for what the final time should be\n\n#Controls\np.set_val('traj.phase0.controls:delta',phase.interpolate(ys=[0.0,0.0], nodes='control_input'),units='rad')\np.set_val('traj.phase0.controls:thrust',phase.interpolate(ys=[0.1, 0.1], nodes='control_input'),units=None) #a small amount of thrust can speed up convergence\n\n\np.run_driver()\nprint('Optimization finished')\n\n#Get optimized time series\nn = p.get_val('traj.phase0.timeseries.states:n')\nt = p.get_val('traj.phase0.timeseries.states:t')\ns = p.get_val('traj.phase0.timeseries.time')\nV = p.get_val('traj.phase0.timeseries.states:V')\nthrust = p.get_val('traj.phase0.timeseries.controls:thrust')\ndelta = p.get_val('traj.phase0.timeseries.controls:delta')\npower = p.get_val('traj.phase0.timeseries.power', units='W')\n\n#Plotting\nif plot:\n\tprint(\"Plotting\")\n\n\n\t#We know the optimal distance from the centerline (n). To transform this into the racing line we fit a spline to the displaced points. This will let us plot the racing line in x/y coordinates\n\ttrackLength = track.getTotalLength()\n\tnormals = getGateNormals(finespline,slope)\n\tnewgates = []\n\tnewnormals = []\n\tnewn = []\n\tfor i in range(len(n)):\n\t\tindex = ((s[i]/s_final)*np.array(finespline).shape[1]).astype(int) #interpolation to find the appropriate index\n\t\tif index[0]==np.array(finespline).shape[1]:\n\t\t\tindex[0] = np.array(finespline).shape[1]-1\n\t\tif i>0 and s[i] == s[i-1]:\n\t\t\tcontinue\n\t\telse:\n\t\t\tnewgates.append([finespline[0][index[0]],finespline[1][index[0]]])\n\t\t\tnewnormals.append(normals[index[0]])\n\t\t\tnewn.append(n[i][0])\n\n\tnewgates = reverseTransformGates(newgates)\n\tdisplacedGates = setGateDisplacements(newn,newgates,newnormals)\n\tdisplacedGates = np.array((transformGates(displacedGates)))\n\n\tnpoints = 1000\n\tdisplacedSpline,gates,gatesd,curv,slope = getSpline(displacedGates,1/npoints,0) #fit the racing line spline to npoints\n\n\tplt.rcParams.update({'font.size': 12})\n\n\n\tdef plotTrackWithData(state,s):\n\t\t#this function plots the track\n\t\tstate = np.array(state)[:,0]\n\t\ts = np.array(s)[:,0]\n\t\ts_new = np.linspace(0,s_final,npoints)\n\n\t\t#Colormap and norm of the track plot\n\t\tcmap = mpl.cm.get_cmap('viridis')\n\t\tnorm = mpl.colors.Normalize(vmin=np.amin(state),vmax=np.amax(state))\n\n\t\tfig, ax = plt.subplots(figsize=(15,6))\n\t\tplt.plot(displacedSpline[0],displacedSpline[1],linewidth=0.1,solid_capstyle=\"butt\") #establishes the figure axis limits needed for plotting the track below\n\n\t\tplt.axis('equal')\n\t\tplt.plot(finespline[0],finespline[1],'k',linewidth=linewidth_from_data_units(8.5,ax),solid_capstyle=\"butt\") #the linewidth is set in order to match the width of the track\n\t\tplt.plot(finespline[0],finespline[1],'w',linewidth=linewidth_from_data_units(8,ax),solid_capstyle=\"butt\") #8 is the width, and the 8.5 wide line draws 'kerbs'\n\t\tplt.xlabel('x (m)')\n\t\tplt.ylabel('y (m)')\n\n\t\t#plot spline with color\n\t\tfor i in range(1,len(displacedSpline[0])):\n\t\t\ts_spline = s_new[i]\n\t\t\tindex_greater = np.argwhere(s>=s_spline)[0][0]\n\t\t\tindex_less = np.argwhere(s<s_spline)[-1][0]\n\n\t\t\tx = s_spline\n\t\t\txp = np.array([s[index_less],s[index_greater]])\n\t\t\tfp = np.array([state[index_less],state[index_greater]])\n\t\t\tinterp_state = np.interp(x,xp,fp) #interpolate the given state to calculate the color\n\n\t\t\t#calculate the appropriate color\n\t\t\tstate_color = norm(interp_state)\n\t\t\tcolor = cmap(state_color)\n\t\t\tcolor = mpl.colors.to_hex(color)\n\n\t\t\t#the track plot consists of thousands of tiny lines:\n\t\t\tpoint = [displacedSpline[0][i],displacedSpline[1][i]]\n\t\t\tprevpoint = [displacedSpline[0][i-1],displacedSpline[1][i-1]]\n\t\t\tif i <=5 or i == len(displacedSpline[0])-1:\n\t\t\t\tplt.plot([point[0],prevpoint[0]],[point[1],prevpoint[1]],color,linewidth=linewidth_from_data_units(1.5,ax),solid_capstyle=\"butt\",antialiased=True)\n\t\t\telse:\n\t\t\t\tplt.plot([point[0],prevpoint[0]],[point[1],prevpoint[1]],color,linewidth=linewidth_from_data_units(1.5,ax),solid_capstyle=\"projecting\",antialiased=True)\n\n\t\tclb = plt.colorbar(mpl.cm.ScalarMappable(norm=norm,cmap=cmap),fraction = 0.02, pad=0.04) #add colorbar\n\n\t\tif np.array_equal(state,V[:,0]):\n\t\t\tclb.set_label('Velocity (m/s)')\n\t\telif np.array_equal(state,thrust[:,0]):\n\t\t\tclb.set_label('Thrust')\n\t\telif np.array_equal(state,delta[:,0]):\n\t\t\tclb.set_label('Delta')\n\n\t\tplt.tight_layout()\n\t\tplt.grid()\n\n\t#Create the plots\n\tplotTrackWithData(V,s)\n\tplotTrackWithData(thrust,s)\n\tplotTrackWithData(delta,s)\n\n\n\t#Plot the main vehicle telemetry\n\tfig, axes = plt.subplots(nrows=4, ncols=1, figsize=(15, 8))\n\n\t#Velocity vs s\n\taxes[0].plot(s,\n\t\t\tp.get_val('traj.phase0.timeseries.states:V'), label='solution')\n\n\taxes[0].set_xlabel('s (m)')\n\taxes[0].set_ylabel('V (m/s)')\n\taxes[0].grid()\n\taxes[0].set_xlim(0,s_final)\n\n\t#n vs s\n\taxes[1].plot(s,\n\t\t\tp.get_val('traj.phase0.timeseries.states:n', units='m'), label='solution')\n\n\taxes[1].set_xlabel('s (m)')\n\taxes[1].set_ylabel('n (m)')\n\taxes[1].grid()\n\taxes[1].set_xlim(0,s_final)\n\n\t#throttle vs s\n\taxes[2].plot(s,thrust)\n\n\taxes[2].set_xlabel('s (m)')\n\taxes[2].set_ylabel('thrust')\n\taxes[2].grid()\n\taxes[2].set_xlim(0,s_final)\n\n\t#delta vs s\n\taxes[3].plot(s,\n\t\t\tp.get_val('traj.phase0.timeseries.controls:delta', units=None), label='solution')\n\n\taxes[3].set_xlabel('s (m)')\n\taxes[3].set_ylabel('delta')\n\taxes[3].grid()\n\taxes[3].set_xlim(0,s_final)\n\n\tplt.tight_layout()\n\n\n\t#Performance constraint plot. Tire friction and power constraints\n\tfig, axes = plt.subplots(nrows=1, ncols=1, figsize=(15, 4))\n\tplt.subplots_adjust(right=0.82,bottom=0.14,top=0.97,left=0.07)\n\n\taxes.plot(s,\n\t\t\tp.get_val('traj.phase0.timeseries.c_fl', units=None), label='c_fl')\n\taxes.plot(s,\n\t\t\tp.get_val('traj.phase0.timeseries.c_fr', units=None), label='c_fr')\n\taxes.plot(s,\n\t\t\tp.get_val('traj.phase0.timeseries.c_rl', units=None), label='c_rl')\n\taxes.plot(s,\n\t\t\tp.get_val('traj.phase0.timeseries.c_rr', units=None), label='c_rr')\n\n\taxes.plot(s,power/pmax,label='Power')\n\n\taxes.legend(bbox_to_anchor=(1.04,0.5), loc=\"center left\")\n\taxes.set_xlabel('s (m)')\n\taxes.set_ylabel('Performance constraints')\n\taxes.grid()\n\taxes.set_xlim(0,s_final)\n\n\tplt.show()", "import unittest\nimport numpy as np\nimport openmdao.api as om\n\n\nclass BrachistochroneODE(om.ExplicitComponent):\n\n def initialize(self):\n self.options.declare('num_nodes', types=int)\n\n def setup(self):\n nn = self.options['num_nodes']\n\n # Inputs\n self.add_input('v', val=np.zeros(nn), desc='velocity', units='m/s')\n\n self.add_input('g', val=9.80665 * np.ones(nn), desc='grav. acceleration', units='m/s/s')\n\n self.add_input('theta', val=np.zeros(nn), desc='angle of wire', units='rad')\n\n self.add_output('xdot', val=np.zeros(nn), desc='velocity component in x', units='m/s')\n\n self.add_output('ydot', val=np.zeros(nn), desc='velocity component in y', units='m/s')\n\n self.add_output('vdot', val=np.zeros(nn), desc='acceleration magnitude', units='m/s**2')\n\n self.add_output('check', val=np.zeros(nn), desc='check solution: v/sin(theta) = constant',\n units='m/s')\n\n # Setup partials\n arange = np.arange(self.options['num_nodes'])\n\n self.declare_partials(of='vdot', wrt='g', rows=arange, cols=arange)\n self.declare_partials(of='vdot', wrt='theta', rows=arange, cols=arange)\n\n self.declare_partials(of='xdot', wrt='v', rows=arange, cols=arange)\n self.declare_partials(of='xdot', wrt='theta', rows=arange, cols=arange)\n\n self.declare_partials(of='ydot', wrt='v', rows=arange, cols=arange)\n self.declare_partials(of='ydot', wrt='theta', rows=arange, cols=arange)\n\n self.declare_partials(of='check', wrt='v', rows=arange, cols=arange)\n self.declare_partials(of='check', wrt='theta', rows=arange, cols=arange)\n\n def compute(self, inputs, outputs):\n theta = inputs['theta']\n cos_theta = np.cos(theta)\n sin_theta = np.sin(theta)\n g = inputs['g']\n v = inputs['v']\n\n outputs['vdot'] = g * cos_theta\n outputs['xdot'] = v * sin_theta\n outputs['ydot'] = -v * cos_theta\n outputs['check'] = v / sin_theta\n\n def compute_partials(self, inputs, jacobian):\n theta = inputs['theta']\n cos_theta = np.cos(theta)\n sin_theta = np.sin(theta)\n g = inputs['g']\n v = inputs['v']\n\n jacobian['vdot', 'g'] = cos_theta\n jacobian['vdot', 'theta'] = -g * sin_theta\n\n jacobian['xdot', 'v'] = sin_theta\n jacobian['xdot', 'theta'] = v * cos_theta\n\n jacobian['ydot', 'v'] = -cos_theta\n jacobian['ydot', 'theta'] = v * sin_theta\n\n jacobian['check', 'v'] = 1 / sin_theta\n jacobian['check', 'theta'] = -v * cos_theta / sin_theta**2\n\n\nclass TestBrachistochroneUndecoratedODE(unittest.TestCase):\n\n def test_brachistochrone_undecorated_ode_gl(self):\n import numpy as np\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n import openmdao.api as om\n from openmdao.utils.assert_utils import assert_near_equal\n import dymos as dm\n\n p = om.Problem(model=om.Group())\n p.driver = om.ScipyOptimizeDriver()\n\n phase = dm.Phase(ode_class=BrachistochroneODE, transcription=dm.GaussLobatto(num_segments=10))\n\n p.model.add_subsystem('phase0', phase)\n\n phase.set_time_options(initial_bounds=(0, 0), duration_bounds=(.5, 10), units='s')\n\n phase.add_state('x', fix_initial=True, fix_final=True, rate_source='xdot', units='m')\n phase.add_state('y', fix_initial=True, fix_final=True, rate_source='ydot', units='m')\n phase.add_state('v', fix_initial=True, rate_source='vdot', targets=['v'], units='m/s')\n\n phase.add_control('theta', units='deg', rate_continuity=False, lower=0.01, upper=179.9, targets=['theta'])\n\n phase.add_parameter('g', units='m/s**2', opt=False, val=9.80665, targets=['g'])\n\n # Minimize time at the end of the phase\n phase.add_objective('time', loc='final', scaler=10)\n\n p.model.linear_solver = om.DirectSolver()\n\n p.setup()\n\n p['phase0.t_initial'] = 0.0\n p['phase0.t_duration'] = 2.0\n\n p['phase0.states:x'] = phase.interpolate(ys=[0, 10], nodes='state_input')\n p['phase0.states:y'] = phase.interpolate(ys=[10, 5], nodes='state_input')\n p['phase0.states:v'] = phase.interpolate(ys=[0, 9.9], nodes='state_input')\n p['phase0.controls:theta'] = phase.interpolate(ys=[5, 100.5], nodes='control_input')\n\n # Solve for the optimal trajectory\n p.run_driver()\n\n # Test the results\n assert_near_equal(p.get_val('phase0.timeseries.time')[-1], 1.8016, tolerance=1.0E-3)\n\n def test_brachistochrone_undecorated_ode_radau(self):\n import numpy as np\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n import openmdao.api as om\n from openmdao.utils.assert_utils import assert_near_equal\n import dymos as dm\n\n p = om.Problem(model=om.Group())\n p.driver = om.ScipyOptimizeDriver()\n\n phase = dm.Phase(ode_class=BrachistochroneODE, transcription=dm.Radau(num_segments=10))\n\n p.model.add_subsystem('phase0', phase)\n\n phase.set_time_options(initial_bounds=(0, 0), duration_bounds=(.5, 10), units='s')\n\n phase.add_state('x', fix_initial=True, fix_final=True, rate_source='xdot', units='m')\n phase.add_state('y', fix_initial=True, fix_final=True, rate_source='ydot', units='m')\n phase.add_state('v', fix_initial=True, rate_source='vdot', targets=['v'], units='m/s')\n\n phase.add_control('theta', units='deg', rate_continuity=False, lower=0.01, upper=179.9, targets=['theta'])\n\n phase.add_parameter('g', units='m/s**2', opt=False, val=9.80665, targets=['g'])\n\n # Minimize time at the end of the phase\n phase.add_objective('time', loc='final', scaler=10)\n\n p.model.linear_solver = om.DirectSolver()\n\n p.setup()\n\n p['phase0.t_initial'] = 0.0\n p['phase0.t_duration'] = 2.0\n\n p['phase0.states:x'] = phase.interpolate(ys=[0, 10], nodes='state_input')\n p['phase0.states:y'] = phase.interpolate(ys=[10, 5], nodes='state_input')\n p['phase0.states:v'] = phase.interpolate(ys=[0, 9.9], nodes='state_input')\n p['phase0.controls:theta'] = phase.interpolate(ys=[5, 100.5], nodes='control_input')\n\n # Solve for the optimal trajectory\n p.run_driver()\n\n # Test the results\n assert_near_equal(p.get_val('phase0.timeseries.time')[-1], 1.8016, tolerance=1.0E-3)\n\n def test_brachistochrone_undecorated_ode_rk(self):\n import numpy as np\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n import openmdao.api as om\n from openmdao.utils.assert_utils import assert_near_equal\n import dymos as dm\n\n p = om.Problem(model=om.Group())\n p.driver = om.ScipyOptimizeDriver()\n\n phase = dm.Phase(ode_class=BrachistochroneODE, transcription=dm.RungeKutta(num_segments=20))\n\n p.model.add_subsystem('phase0', phase)\n\n phase.set_time_options(initial_bounds=(0, 0), duration_bounds=(.5, 10), units='s')\n\n phase.add_state('x', fix_initial=True, rate_source='xdot', units='m')\n phase.add_state('y', fix_initial=True, rate_source='ydot', units='m')\n phase.add_state('v', fix_initial=True, rate_source='vdot', targets=['v'], units='m/s')\n\n phase.add_control('theta', units='deg', rate_continuity=False, lower=0.01, upper=179.9, targets=['theta'])\n\n phase.add_parameter('g', units='m/s**2', opt=False, val=9.80665, targets=['g'])\n\n phase.add_boundary_constraint('x', loc='final', equals=10)\n phase.add_boundary_constraint('y', loc='final', equals=5)\n\n # Minimize time at the end of the phase\n phase.add_objective('time', loc='final', scaler=10)\n\n p.model.linear_solver = om.DirectSolver()\n\n p.setup()\n\n p['phase0.t_initial'] = 0.0\n p['phase0.t_duration'] = 2.0\n\n p['phase0.states:x'] = phase.interpolate(ys=[0, 10], nodes='state_input')\n p['phase0.states:y'] = phase.interpolate(ys=[10, 5], nodes='state_input')\n p['phase0.states:v'] = phase.interpolate(ys=[0, 9.9], nodes='state_input')\n p['phase0.controls:theta'] = phase.interpolate(ys=[5, 100.5], nodes='control_input')\n\n # Solve for the optimal trajectory\n p.run_driver()\n\n # Test the results\n assert_near_equal(p.get_val('phase0.timeseries.time')[-1], 1.8016, tolerance=1.0E-3)\n\n\nclass TestBrachistochroneBasePhaseClass(unittest.TestCase):\n\n def test_brachistochrone_base_phase_class_gl(self):\n import openmdao.api as om\n from openmdao.utils.assert_utils import assert_near_equal\n import dymos as dm\n\n class BrachistochronePhase(dm.Phase):\n\n def setup(self):\n\n self.options['ode_class'] = BrachistochroneODE\n self.set_time_options(initial_bounds=(0, 0), duration_bounds=(.5, 10), units='s')\n self.add_state('x', fix_initial=True, rate_source='xdot', units='m')\n self.add_state('y', fix_initial=True, rate_source='ydot', units='m')\n self.add_state('v', fix_initial=True, rate_source='vdot', targets=['v'],\n units='m/s')\n self.add_control('theta', units='deg', rate_continuity=False,\n lower=0.01, upper=179.9, targets=['theta'])\n self.add_parameter('g', units='m/s**2', opt=False, val=9.80665,\n targets=['g'])\n\n super(BrachistochronePhase, self).setup()\n\n p = om.Problem(model=om.Group())\n p.driver = om.ScipyOptimizeDriver()\n\n phase = BrachistochronePhase(transcription=dm.GaussLobatto(num_segments=20, order=3))\n p.model.add_subsystem('phase0', phase)\n\n phase.add_boundary_constraint('x', loc='final', equals=10)\n phase.add_boundary_constraint('y', loc='final', equals=5)\n\n # Minimize time at the end of the phase\n phase.add_objective('time', loc='final', scaler=10)\n\n p.model.linear_solver = om.DirectSolver()\n\n p.setup()\n\n p['phase0.t_initial'] = 0.0\n p['phase0.t_duration'] = 2.0\n\n p['phase0.states:x'] = phase.interpolate(ys=[0, 10], nodes='state_input')\n p['phase0.states:y'] = phase.interpolate(ys=[10, 5], nodes='state_input')\n p['phase0.states:v'] = phase.interpolate(ys=[0, 9.9], nodes='state_input')\n p['phase0.controls:theta'] = phase.interpolate(ys=[5, 100.5], nodes='control_input')\n\n # Solve for the optimal trajectory\n p.run_driver()\n\n # Test the results\n assert_near_equal(p.get_val('phase0.timeseries.time')[-1], 1.8016, tolerance=1.0E-3)\n\n exp_out = phase.simulate()\n\n assert_near_equal(exp_out.get_val('phase0.timeseries.states:x')[-1], 10, tolerance=1.0E-3)\n assert_near_equal(exp_out.get_val('phase0.timeseries.states:y')[-1], 5, tolerance=1.0E-3)\n" ]
[ [ "numpy.amax", "numpy.linspace", "matplotlib.pyplot.plot", "matplotlib.pyplot.rcParams.update", "matplotlib.pyplot.tight_layout", "matplotlib.cm.ScalarMappable", "numpy.interp", "matplotlib.pyplot.axis", "matplotlib.pyplot.subplots_adjust", "numpy.amin", "matplotlib.colors.to_hex", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "numpy.array_equal", "matplotlib.pyplot.subplots", "numpy.argwhere", "matplotlib.pyplot.grid", "matplotlib.cm.get_cmap", "matplotlib.pyplot.xlabel" ], [ "numpy.arange", "matplotlib.use", "numpy.cos", "numpy.sin", "numpy.ones", "numpy.zeros" ] ]